<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIRx Med</journal-id><journal-id journal-id-type="publisher-id">xmed</journal-id><journal-id journal-id-type="index">34</journal-id><journal-title>JMIRx Med</journal-title><abbrev-journal-title>JMIRx Med</abbrev-journal-title><issn pub-type="epub">2563-6316</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v6i1e60866</article-id><article-id pub-id-type="doi">10.2196/60866</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Improved Alzheimer Disease Diagnosis With a Machine Learning Approach and Neuroimaging: Case Study Development</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Lazli</surname><given-names>Lilia</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1"/></contrib></contrib-group><aff id="aff1"><institution>Department of Computer and Software Engineering, Polytechnique Montr&#x00E9;al, University of Montreal</institution><addr-line>2500 Chem de Polytechnique</addr-line><addr-line>Montreal</addr-line><addr-line>QC</addr-line><country>Canada</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Hang</surname><given-names>Ching Nam</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Anonymous</surname><given-names/></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Khani</surname><given-names>Masoud</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Anonymous</surname><given-names/></name></contrib></contrib-group><author-notes><corresp>Correspondence to Lilia Lazli, PhD, Department of Computer and Software Engineering, Polytechnique Montr&#x00E9;al, University of Montreal, 2500 Chem de Polytechnique, Montreal, QC, H3T 1J4, Canada, 1(514) 340-5121 ext 3750; <email>lazlilia28@gmail.com</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>21</day><month>4</month><year>2025</year></pub-date><volume>6</volume><elocation-id>e60866</elocation-id><history><date date-type="received"><day>23</day><month>05</month><year>2024</year></date><date date-type="rev-recd"><day>09</day><month>02</month><year>2025</year></date><date date-type="accepted"><day>10</day><month>02</month><year>2025</year></date></history><copyright-statement>&#x00A9; Lilia Lazli. Originally published in JMIRx Med (<ext-link ext-link-type="uri" xlink:href="https://med.jmirx.org">https://med.jmirx.org</ext-link>), 21.4.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIRx Med, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://med.jmirx.org/">https://med.jmirx.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://xmed.jmir.org/2025/1/e60866"/><related-article related-article-type="companion" ext-link-type="doi" xlink:href="10.48550/arXiv.2405.09553" xlink:title="Preprint (arXiv)" xlink:type="simple">https://arxiv.org/abs/2405.09553v1</related-article><related-article related-article-type="companion" ext-link-type="doi" xlink:href="10.2196/73768" xlink:title="Peer-Review Report by Anonymous" xlink:type="simple">https://med.jmirx.org/2025/1/e73768</related-article><related-article related-article-type="companion" ext-link-type="doi" xlink:href="10.2196/73454" xlink:title="Peer-Review Report by Masoud Khani (Reviewer AS)" xlink:type="simple">https://med.jmirx.org/2025/1/e73454</related-article><related-article related-article-type="companion" ext-link-type="doi" xlink:href="10.2196/73130" xlink:title="Peer-Review Report by Anonymous" xlink:type="simple">https://med.jmirx.org/2025/1/e73130</related-article><related-article related-article-type="companion" ext-link-type="doi" xlink:href="10.2196/72821" xlink:title="Authors' Response to Peer-Review Reports" xlink:type="simple">https://med.jmirx.org/2025/1/e72821</related-article><abstract><sec><title>Background</title><p>Alzheimer disease (AD) is a severe neurological brain disorder. While not curable, earlier detection can help improve symptoms substantially. Machine learning (ML) models are popular and well suited for medical image processing tasks such as computer-aided diagnosis. These techniques can improve the process for an accurate diagnosis of AD.</p></sec><sec><title>Objective</title><p>In this paper, a complete computer-aided diagnosis system for the diagnosis of AD has been presented. We investigate the performance of some of the most used ML techniques for AD detection and classification using neuroimages from the Open Access Series of Imaging Studies (OASIS) and Alzheimer&#x2019;s Disease Neuroimaging Initiative (ADNI) datasets.</p></sec><sec sec-type="methods"><title>Methods</title><p>The system uses artificial neural networks (ANNs) and support vector machines (SVMs) as classifiers, and dimensionality reduction techniques as feature extractors. To retrieve features from the neuroimages, we used principal component analysis (PCA), linear discriminant analysis, and t-distributed stochastic neighbor embedding. These features are fed into feedforward neural networks (FFNNs) and SVM-based ML classifiers. Furthermore, we applied the vision transformer (ViT)&#x2013;based ANNs in conjunction with data augmentation to distinguish patients with AD from healthy controls.</p></sec><sec sec-type="results"><title>Results</title><p>Experiments were performed on magnetic resonance imaging and positron emission tomography scans. The OASIS dataset included a total of 300 patients, while the ADNI dataset included 231 patients. For OASIS, 90 (30%) patients were healthy and 210 (70%) were severely impaired by AD. Likewise for the ADNI database, a total of 149 (64.5%) patients with AD were detected and 82 (35.5%) patients were used as healthy controls. An important difference was established between healthy patients and patients with AD (<italic>P</italic>=.02). We examined the effectiveness of the three feature extractors and classifiers using 5-fold cross-validation and confusion matrix&#x2013;based standard classification metrics, namely, accuracy, sensitivity, specificity, precision, <italic>F</italic><sub>1</sub>-score, and area under the receiver operating characteristic curve (AUROC). Compared with the state-of-the-art performing methods, the success rate was satisfactory for all the created ML models, but SVM and FFNN performed best with the PCA extractor, while the ViT classifier performed best with more data. The data augmentation/ViT approach worked better overall, achieving accuracies of 93.2% (sensitivity=87.2, specificity=90.5, precision=87.6, <italic>F</italic><sub>1</sub>-score=88.7, and AUROC=92) for OASIS and 90.4% (sensitivity=85.4, specificity=88.6, precision=86.9, <italic>F</italic><sub>1</sub>-score=88, and AUROC=90) for ADNI.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Effective ML models using neuroimaging data could help physicians working on AD diagnosis and will assist them in prescribing timely treatment to patients with AD. Good results were obtained on the OASIS and ADNI datasets with all the proposed classifiers, namely, SVM, FFNN, and ViTs. However, the results show that the ViT model is much better at predicting AD than the other models when a sufficient amount of data are available to perform the training. This highlights that the data augmentation process could impact the overall performance of the ViT model.</p></sec></abstract><kwd-group><kwd>Alzheimer disease</kwd><kwd>computer-aided diagnosis system</kwd><kwd>machine learning</kwd><kwd>principal component analysis</kwd><kwd>linear discriminant analysis</kwd><kwd>t-distributed stochastic neighbor embedding</kwd><kwd>feedforward neural network</kwd><kwd>vision transformer architecture</kwd><kwd>support vector machines</kwd><kwd>magnetic resonance imaging</kwd><kwd>positron emission tomography imaging</kwd><kwd>Open Access Series of Imaging Studies</kwd><kwd>Alzheimer's Disease Neuroimaging Initiative</kwd><kwd>OASIS</kwd><kwd>ADNI</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Alzheimer disease (AD) is a progressive degenerative brain disorder that gradually destroys memory, reason, judgment, language, and ultimately the ability to perform even the simplest of tasks [<xref ref-type="bibr" rid="ref1">1</xref>]. An automated AD classification system is crucial for the early detection of disease. This computer-aided diagnosis (CAD) system can help expert clinicians prescribe the proper treatment and prevent brain tissue damage [<xref ref-type="bibr" rid="ref1">1</xref>].</p><p>In the last decades, researchers have developed several CAD systems [<xref ref-type="bibr" rid="ref1">1</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. Rule-based expert systems were developed from the 1970s to the 1990s and supervised models from the 1990s [<xref ref-type="bibr" rid="ref1">1</xref>]. Moreover, several approaches have been proposed in the literature aiming at providing an automatic tool that guides the clinician in the AD diagnosis process [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref5">5</xref>-<xref ref-type="bibr" rid="ref7">7</xref>]. We can categorize these approaches into two types: univariate approaches, like statistical parametric mapping (SPM), and multivariate approaches, like the voxels-as-features (VAF) approach.</p><p>Due to advances in computing power, machine learning (ML) has encompassed many health care sectors and has shown results with organ and substructure segmentation as well as disease classifications in areas of pathology, brain, breast, bone, retina, etc. Open-access datasets on AD have led to the development of CAD systems that use ML to help scientists and medical staff make early diagnoses. These systems will ultimately help speed up the treatment of patients with AD. To make predictions, scientists have adopted various ML-based classifiers, including support vector machines (SVMs) [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>], hidden Markov models [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>], <italic>k</italic>-nearest neighbors classifier [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>], discriminant analysis [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>], random forest [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>], decision trees [<xref ref-type="bibr" rid="ref18">18</xref>], naive Bayes classifier [<xref ref-type="bibr" rid="ref19">19</xref>,<xref ref-type="bibr" rid="ref20">20</xref>], and artificial neural networks (ANNs) [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>].</p><p>Despite the efforts of researchers, there have been few works on AD detection using ML models that have had significant performance, and the development of an automated AD classification model remains a rather challenging task. Within this framework of distinguishing between healthy controls (HCs) and people with AD, the main contributions of this paper can be summarized as follows.</p><list list-type="bullet"><list-item><p>We developed a CAD system using the best-supervised learning classifiers, such as SVMs [<xref ref-type="bibr" rid="ref8">8</xref>,<xref ref-type="bibr" rid="ref9">9</xref>], feedforward neural networks (FFNNs) [<xref ref-type="bibr" rid="ref23">23</xref>], and transformer neural networks, especially the vision transformer (ViT) architecture [<xref ref-type="bibr" rid="ref24">24</xref>], which is becoming more popular in the field of computer vision due to its effectiveness.</p></list-item><list-item><p>We designed these models to analyze the two neuroimages commonly used in AD diagnosis, namely, structural magnetic resonance imaging (sMRI) and fluorodeoxyglucose (FDG)&#x2013;positron emission tomography (PET) as these modalities are the preeminent sources of information in the CAD process.</p></list-item><list-item><p>The multimodal CAD system uses principal component analysis (PCA) [<xref ref-type="bibr" rid="ref25">25</xref>] in conjunction with SVM and FFNN, training them on the PCA features extracted from the neurological images.</p></list-item><list-item><p>The most challenging datasets, namely the Open Access Series of Imaging Studies (OASIS) [<xref ref-type="bibr" rid="ref26">26</xref>] and Alzheimer&#x2019;s Disease Neuroimaging Initiative (ADNI) [<xref ref-type="bibr" rid="ref27">27</xref>] datasets, underwent rigorous tests using various experimental settings. These experiments validated the effectiveness of the chosen models, showcasing their superiority over state-of-the-art approaches in terms of accuracy, sensitivity, specificity, precision, <italic>F</italic><sub>1</sub>-score, and area under the receiver operating characteristic curve (AUROC).</p></list-item></list></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Participants</title><p>Sometimes we found signs of AD in the brain data of healthy and older patients, so considerable experience and knowledge were essential to distinguish the AD data from the HC patients&#x2019; data. In this context, we have experimented the performance of the proposed CAD system on the OASIS [<xref ref-type="bibr" rid="ref26">26</xref>] and ADNI [<xref ref-type="bibr" rid="ref27">27</xref>] datasets.</p><sec id="s2-1-1"><title>OASIS Dataset</title><p>The OASIS dataset [<xref ref-type="bibr" rid="ref26">26</xref>] was prepared by Dr Randy Buckner from the Howard Hughes Medical Institute at Harvard University, the Neuroinformatics Research Group at Washington University School of Medicine, and the Biomedical Informatics Research Network. OASIS is a longitudinal multimodal neuroimaging, clinical, cognitive, and biomarker dataset for normal aging and AD. We selected the patients with and without dementia from a larger database and obtained them from the longitudinal pool of the Washington University Alzheimer Disease Research Center. The experiment used a dataset that included 90 cognitively normal patients and 210 individuals with AD. The AD group included very mild, mild, moderate, and severe dementia.</p></sec><sec id="s2-1-2"><title>ADNI Dataset</title><p>The ADNI dataset [<xref ref-type="bibr" rid="ref27">27</xref>], which is the most commonly used in machine learning tasks, is an association of medical centers and universities located in the United States and Canada. ADNI is funded by the National Institute on Aging and the National Institute of Biomedical Imaging and Bioengineering, and through generous contributions from the following: AbbVie; Alzheimer&#x2019;s Association; Alzheimer&#x2019;s Drug Discovery Foundation; Araclon Biotech; BioClinica, Inc; Biogen; Bristol-Myers Squibb Company; CereSpir, Inc; Cogstate; Eisai Co., Ltd; Elan Pharmaceuticals, Inc; Eli Lilly and Company; EUROIMMUN; F. Hoffmann-La Roche Ltd and its affiliated company Genentech, Inc; Fujirebio; GE HealthCare; IXICO plc; Janssen Alzheimer Immunotherapy Research &#x0026; Development, LLC; Johnson &#x0026; Johnson Pharmaceutical Research &#x0026; Development, LLC; Lumosity; Lundbeck; Merck &#x0026; Co., Inc; Meso Scale Diagnostics LLC; NeuroRx Research; Neurotrack Technologies; Novartis Pharmaceuticals Corporation; Pfizer Inc; Piramal Imaging; Servier; Takeda Pharmaceutical Company; and Transition Therapeutics. The Canadian Institutes of Health Research is providing funds to support ADNI clinical sites in Canada. Private sector contributions are facilitated by the Foundation for the National Institutes of Health. The grantee organization is the Northern California Institute for Research and Education, and the study is coordinated by the Alzheimer&#x2019;s Therapeutic Research Institute at the University of Southern California. ADNI data are disseminated by the Laboratory for Neuroimaging at the University of Southern California.</p><p>The main aim of ADNI is to provide open-source datasets to discover biomarkers and identify and track the progression of AD accurately. It developed to become an ideal source of longitudinal multisite PET and magnetic resonance imaging (MRI) images of patients with AD and older control patients (HC). The datasets were formed to make the detection system powerful by providing baseline information regarding changes in brain structure and metabolism, as well as clinical, cognitive, and biochemical data. The ADNI cohort used in our study included 82 cognitively normal patients and 149 patients with AD. The AD group included patients with mild cognitive impairment and those with confirmed AD.</p></sec></sec><sec id="s2-2"><title>Ethical Considerations</title><p>This work used two datasets (ADNI and OASIS), which are available in the public domain. For the benchmark ADNI dataset, the terms of use are declared on their website [<xref ref-type="bibr" rid="ref28">28</xref>]. All patients in the ADNI database provided written informed consent, which was approved by the institutional review board of each participating institution. Patients were informed that their information would be kept confidential and their data would be anonymous and would be part of scientific publications.</p><p>According to local legislation and institutional requirements, the study of human participants using the OASIS dataset does not require ethical review and approval [<xref ref-type="bibr" rid="ref26">26</xref>]. Written informed consent from the patients&#x2019; legal guardians or next of kin was not required to participate in this study in accordance with national legislation and institutional requirements [<xref ref-type="bibr" rid="ref26">26</xref>]. The data used for the analysis has been deidentified and made public.</p></sec><sec id="s2-3"><title>Data Preparation</title><p>We performed the following steps on the OASIS and ADNI neuroimages: normalization, resizing, removing nonbrain slices, selecting slices with the most information, and converting 3D images into 2D slices. First, the damaged original files containing the images were removed. We selected a larger number of central slices to aid the CAD system in accurately classifying AD. We used an SPM tool (SPM8 [<xref ref-type="bibr" rid="ref29">29</xref>]), which is a major update to SPM software, originally developed by Karl Friston, to partially correct spatial intensity inhomogeneities. This software normalized all the images using a general affine model with 12 parameters. The origin of the raw sMRI scans was set manually to anterior commissure before manually registering them with SPM&#x2019;s canonical T1 template image. We applied the nonparametric nonuniform intensity normalization (N3) technique to solve the tissue intensity nonuniformity problem [<xref ref-type="bibr" rid="ref30">30</xref>]. Then the hybrid median filter was used to remove impulse noise while preserving edges.</p></sec><sec id="s2-4"><title>ML Approaches</title><sec id="s2-4-1"><title>Overview</title><p>A generic automated AD detection and classification framework is summarized in <xref ref-type="fig" rid="figure1">Figure 1</xref>. ML classifiers aim to predict the class of the input data (images of patients with AD or healthy patients) by looking at a number of learning examples. The process begins with the preprocessing of sMRI and FDG-PET images to keep only relevant data. Then each image is represented by grayscale features and is collapsed into a new feature space by applying PCA-based feature extraction to pick the optimal features. After that, to classify the patients, these selected features are fed to the supervised learner. In this work, SVMs and FFNNs are learned on the PCA features extracted from the neuroimages. While for ViT, we applied the data augmentation strategy [<xref ref-type="bibr" rid="ref31">31</xref>], since the training of this network required more data compared to the other two classifiers. For PCA, a performance comparison was made with similar techniques, t-distributed stochastic neighbor embedding (t-SNE) [<xref ref-type="bibr" rid="ref32">32</xref>] and linear discriminant analysis (LDA) [<xref ref-type="bibr" rid="ref14">14</xref>].</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Block diagram of a generic Alzheimer disease computer-aided diagnosis system. ADNI: Alzheimer&#x2019;s Disease Neuroimaging Initiative; DL: deep learning; FFNN: feedforward neural network; LDA: linear discriminant analysis; ML: machine learning; MRI: magnetic resonance imaging; OASIS: Open Access Series of Imaging Studies; PCA: principal component analysis; PET: positron emission tomography; SVM: support vector machine; t-SNE: t-distributed stochastic neighbor embedding; ViT: vision transformer.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xmed_v6i1e60866_fig01.png"/></fig><p>Below is a summary description of the four approaches proposed for our CAD system, and more details on the mathematical background of these approaches can be found in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> for PCA, <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> for SVM, <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref> for FFNN, and <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref> for ViT.</p></sec><sec id="s2-4-2"><title>Principal Component Analysis</title><p>PCA is a linear dimensionality reduction method used widely in data preprocessing and exploratory analysis. Different image classification purposes have successfully used PCA because its method is nonparametric and easy to apply, and helps extract useful information from confusing datasets [<xref ref-type="bibr" rid="ref25">25</xref>].</p><p>In this study, we used this technique to extract useful features for classifiers. PCA allows the production of new variables that represent linear combinations of the original variables. Using linear algebra and matrix operations, a transformation is performed from the original dataset to a new coordinate system structured by the principal components. The analysis of this linear transformation is obtained thanks to the eigenvectors and the eigenvalues of the covariance matrix. The PCA steps are summarized as follows: (1) standardize the range of continuous initial variables, (2) find correlations by computing the covariance matrix, (3) find the eigenvectors and eigenvalues of the covariance matrix, (4) choose the principal components, and (5) change the data to the new coordinate system. More details about the PCA computation process with mathematical formulas are explained in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s2-4-3"><title>Support Vector Machines</title><p>We used SVMs as classifiers for the classification of independent and identically distributed data [<xref ref-type="bibr" rid="ref23">23</xref>]. These machines are widely used as supervised max-margin models, along with associated learning algorithms that analyze data. To distinguish two classes, the principle of SVMs is to seek the optimal hyperplane that allows for maximizing the margin between the closest data points of the opposite classes.</p><p>The SVM algorithm for linear classification is widely used in ML. However, in this study, we used SVMs to perform nonlinear classification due to the data&#x2019;s nonlinear separability. We achieved this by applying a kernel function to represent the data as a set of pairwise similarity comparisons between the original data points.</p><p>This function transforms the original data points into coordinates in a higher-dimensional feature space, thereby facilitating linear separation. <xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref> provides further details about the SVM computation process, including mathematical formulas.</p></sec><sec id="s2-4-4"><title>Feedforward Neural Network</title><p>Biological nervous systems, such as the brain, inspire the information-processing paradigm of FFNN, which is one of the two main types of ANNs [<xref ref-type="bibr" rid="ref23">23</xref>]. The distinctive feature of this network is the unidirectional flow of information, meaning that the information flow in the model is only in one direction&#x2014;forward&#x2014;without any loops or cycles. Information flows from the input nodes through the hidden nodes and to the output nodes.</p><p>This network is static and memoryless. Given a data input, FFNN provides a single set of output values instead of a sequence of values. Furthermore, the response produced for an input is independent of the previous state of the network. FFNN automatically learns from examples and uses a backpropagation learning algorithm for determining weights. More details about the FFNN computation process with mathematical foundations are explained in <xref ref-type="supplementary-material" rid="app3">Multimedia Appendix 3</xref>.</p></sec><sec id="s2-4-5"><title>Transformers</title><p>Transformers, which dominate natural language processing, have acquired a reputation in computer vision owing to their positive results in many applications such as semantic segmentation, object detection, and image classification. Transformer architecture entirely relies on an attention mechanism to produce global dependencies between input and output, avoiding recurrence. Self-attention assesses the sequence representation by connecting various positions within a single sequence.</p><p>In this work, we applied a ViT architecture [<xref ref-type="bibr" rid="ref24">24</xref>] to neuroimages with very little adjustment, demonstrating better performance in numerous computer-vision tasks. ViT uses a multiheaded self-attention mechanism to catch and learn long-range dependencies between distant positions by averaging attention-weighted positions. This promotes the network&#x2019;s focus on all of the data of the input sequence. This characteristic encourages us to use ViT for our brain imaging study owing to its capacity to precisely catch interdependencies between spreaded brain regions. More details about the ViT computation process with mathematical foundations are explained in <xref ref-type="supplementary-material" rid="app4">Multimedia Appendix 4</xref>.</p><p>Nevertheless, the learning dataset is too small, involving substantial data to learn a ViT from scratch. In this regard, we used data augmentation to expand the size of the input data by creating additional data from the original input data. To create new images, we performed some geometric transformations. The visual transformation primarily focuses on translating, flipping random images horizontally, rotating them at 15 angles without cropping, and rescaling the input data to the range of [0, 1].</p></sec></sec><sec id="s2-5"><title>Statistical Analysis</title><p>We have carried out the performance assessment and the comparison of the classifiers using typical confusion matrix&#x2013;based evaluation metrics. The confusion matrix has the elements of true positive (TP), false positive (FP), false negative (FN), and true negative (TN). Each column of the matrix indicates an instance of the predicted class, and each row contains a true (correct or actual) class. The following are the metrics used to evaluate the performance of the CAD system.</p><p>Sensitivity&#x2014;also known as recall&#x2014;is used for calculating the classifier&#x2019;s ability to correctly predict Alzheimer instances (AD class). On the other hand, the classifier uses specificity to accurately predict all non-Alzheimer instances (HC class) across all inputs.</p><p>A classifier should have high sensitivity and specificity. Therefore, the accuracy metric, which calculates the number of correctly classified instances relative to the total number of instances, is the average of these two measures. The precision metric measures the classifier&#x2019;s ability to quantify the number of TPs of the AD class that receive a correct label in classification.</p><p>The combined harmonic mean of both sensitivity and precision gives the <italic>F</italic><sub>1</sub>-score, which takes a value between 0 and 1. The receiver operating characteristic curve, a method for visualizing a classifier&#x2019;s ability to diagnose or predict correctly, clearly illustrates the trade-off that arises between the sensitivity and specificity metrics. At various thresholds, the receiver operating characteristic curve plots the TP rate or sensitivity against the FP rate (1 &#x2013; specificity).</p><p>We aim to determine the degree of separability, or the ability to correctly predict class, using the AUROC. The higher the AUROC, the better; 1 would be perfect, and 0.5 would be random. Accuracy, sensitivity, specificity, precision, <italic>F</italic><sub>1</sub>-score, and AUROC are the six main metrics used to assess the efficacy of each classifier. The following are the mathematical formulas for the first five metrics.</p><disp-formula id="E1"><label>(1)</label><mml:math id="eqn1"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>A</mml:mi><mml:mi>c</mml:mi><mml:mi>c</mml:mi><mml:mi>u</mml:mi><mml:mi>r</mml:mi><mml:mi>a</mml:mi><mml:mi>c</mml:mi><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi><mml:mo>+</mml:mo><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="E2"><label>(2)</label><mml:math id="eqn2"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>S</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>v</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>N</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="E3"><label>(3)</label><mml:math id="eqn3"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>S</mml:mi><mml:mi>p</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>f</mml:mi><mml:mi>i</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi><mml:mi>y</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>N</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>N</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="E4"><label>(4)</label><mml:math id="eqn4"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi></mml:mrow><mml:mrow><mml:mi>T</mml:mi><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>F</mml:mi><mml:mi>P</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math></disp-formula><disp-formula id="E5"><label>(5)</label><mml:math id="eqn5"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>F</mml:mi><mml:mn>1</mml:mn><mml:mo>&#x2212;</mml:mo><mml:mi>s</mml:mi><mml:mi>c</mml:mi><mml:mi>o</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mo>=</mml:mo><mml:mn>2</mml:mn><mml:mfrac><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>&#x00D7;</mml:mo><mml:mi>S</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>v</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi><mml:mi>y</mml:mi></mml:mrow><mml:mrow><mml:mi>P</mml:mi><mml:mi>r</mml:mi><mml:mi>e</mml:mi><mml:mi>c</mml:mi><mml:mi>i</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>o</mml:mi><mml:mi>n</mml:mi><mml:mo>+</mml:mo><mml:mi>S</mml:mi><mml:mi>e</mml:mi><mml:mi>n</mml:mi><mml:mi>s</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi><mml:mi>i</mml:mi><mml:mi>v</mml:mi><mml:mi>i</mml:mi><mml:mi>t</mml:mi><mml:mi>y</mml:mi></mml:mrow></mml:mfrac></mml:mrow></mml:mstyle></mml:math></disp-formula></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>We experimented the performance of the proposed CAD system on patients&#x2019; images from the OASIS [<xref ref-type="bibr" rid="ref26">26</xref>] and ADNI [<xref ref-type="bibr" rid="ref27">27</xref>] datasets. These datasets contain sMRI and FDG-PET scans along with information about the patients&#x2019; demographics and clinical assessments. There are 300 patients for OASIS and 231 patients for ADNI whose age was between 18 and 96 years, and each patient had 3 or 4 accessible PET and T1-weighted MRI scans. <xref ref-type="table" rid="table1">Tables 1</xref> and <xref ref-type="table" rid="table2">2</xref> provide more details on the demographic and clinical characteristics of participants.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>The demographic information (gender, race, class, right-handed) of participants.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable</td><td align="left" valign="bottom">OASIS<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup> patients (n=300), n (%)</td><td align="left" valign="bottom">ADNI<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup> patients (n=231), n (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Gender</td></tr><tr><td align="left" valign="top">&#x2003;Women</td><td align="left" valign="top">80 (26.7)</td><td align="left" valign="top">99 (42.9)</td></tr><tr><td align="left" valign="top">&#x2003;Men</td><td align="left" valign="top">220 (73.3)</td><td align="left" valign="top">132 (57.1)</td></tr><tr><td align="left" valign="top" colspan="3">Race</td></tr><tr><td align="left" valign="top">&#x2003;Caucasian</td><td align="left" valign="top">174 (58.0)</td><td align="left" valign="top">159 (68.8)</td></tr><tr><td align="left" valign="top">&#x2003;African-American</td><td align="left" valign="top">122 (40.7)</td><td align="left" valign="top">70 (30.3)</td></tr><tr><td align="left" valign="top">&#x2003;Asian</td><td align="left" valign="top">4 (1.3)</td><td align="left" valign="top">2 (0.9)</td></tr><tr><td align="left" valign="top" colspan="3">Class</td></tr><tr><td align="left" valign="top">&#x2003;Alzheimer</td><td align="left" valign="top">210 (70.0)</td><td align="left" valign="top">149 (64.5)</td></tr><tr><td align="left" valign="top">&#x2003;Healthy</td><td align="left" valign="top">90 (30.0)</td><td align="left" valign="top">82 (35.5)</td></tr><tr><td align="left" valign="top" colspan="3">Right-handed</td></tr><tr><td align="left" valign="top">&#x2003;Women</td><td align="left" valign="top">77 (96.3)</td><td align="left" valign="top">93 (93.9)</td></tr><tr><td align="left" valign="top">&#x2003;Men</td><td align="left" valign="top">219 (99.5)</td><td align="left" valign="top">130 (98.5)</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>OASIS: Open Access Series of Imaging Studies.</p></fn><fn id="table1fn2"><p><sup>b</sup>ADNI: Alzheimer&#x2019;s Disease Neuroimaging Initiative.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>The demographic characteristics and clinical assessment data in terms of age, education, mini-mental state examination, and Alzheimer&#x2019;s Disease Assessment Scale&#x2013;Cognitive subscale.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Variable</td><td align="left" valign="bottom">OASIS<sup><xref ref-type="table-fn" rid="table2fn1">d</xref></sup> patients, mean (SD; range)</td><td align="left" valign="bottom">ADNI<sup><xref ref-type="table-fn" rid="table2fn2">e</xref></sup> patients, mean (SD; range)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="3">Age (years)</td></tr><tr><td align="left" valign="top">&#x2003;Women</td><td align="left" valign="top">67.78 (43.2&#x2010;95.6)</td><td align="left" valign="top">75.3 (5.2)</td></tr><tr><td align="left" valign="top">&#x2003;Men</td><td align="left" valign="top">70.17 (42.5&#x2010;91.7)</td><td align="left" valign="top">75.4 (7.1)</td></tr><tr><td align="left" valign="top" colspan="3">Education</td></tr><tr><td align="left" valign="top">&#x2003;Women</td><td align="left" valign="top">14.3 (1.6; 9-18)</td><td align="left" valign="top">15.6 (3.2)</td></tr><tr><td align="left" valign="top">&#x2003;Men</td><td align="left" valign="top">15.2 (2.7; 8-23)</td><td align="left" valign="top">14.9 (3.4)</td></tr><tr><td align="left" valign="top" colspan="3">Mini-mental state examination<sup><xref ref-type="table-fn" rid="table2fn3">f</xref></sup></td></tr><tr><td align="left" valign="top">&#x2003;Baseline (women)</td><td align="left" valign="top">25.4 (0.4; 22-26)</td><td align="left" valign="top">29.0 (1.2; 19-26)</td></tr><tr><td align="char" char="hyphen" valign="top">&#x2003;2 years (women)</td><td align="left" valign="top">&#x2014;<sup><xref ref-type="table-fn" rid="table2fn4">g</xref></sup></td><td align="left" valign="top">29.0 (1.3)</td></tr><tr><td align="left" valign="top">&#x2003;Baseline (men)</td><td align="left" valign="top">23.8 (1.9; 25-29)</td><td align="left" valign="top">23.8 (1.9; 25&#x2013;29)</td></tr><tr><td align="char" char="." valign="top">&#x2003;2 years (men)</td><td align="left" valign="top">19.3 (5.6)</td><td align="left" valign="top">29.0 (1.2; 19-26)</td></tr><tr><td align="left" valign="top" colspan="3">Alzheimer&#x2019;s Disease Assessment Scale&#x2013;Cognitive subscale<sup><xref ref-type="table-fn" rid="table2fn5">h</xref></sup></td></tr><tr><td align="left" valign="top">&#x2003;Baseline (women)</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">7.3 (3.3)</td></tr><tr><td align="char" char="hyphen" valign="top">&#x2003;2 years (women)</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">6.3 (3.5)</td></tr><tr><td align="left" valign="top">&#x2003;Baseline (men)</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">7.3 (3.3)</td></tr><tr><td align="char" char="." valign="top">&#x2003;2 years (men)</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">27.3 (11.7)</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>d</sup>OASIS: Open Access Series of Imaging Studies.</p></fn><fn id="table2fn2"><p><sup>e</sup>ADNI: Alzheimer&#x2019;s Disease Neuroimaging Initiative.</p></fn><fn id="table2fn3"><p><sup>f</sup>The mini-mental state examination has a possible score range of 0-30.</p></fn><fn id="table2fn4"><p><sup>g</sup>Not available.</p></fn><fn id="table2fn5"><p><sup>h</sup>The Alzheimer&#x2019;s Disease Assessment Scale&#x2013;Cognitive subscale has a possible score range of 0-30.</p></fn></table-wrap-foot></table-wrap><p>We used a clinical dementia rating scale to control the dementia status of the dataset; a score of 0 on the scale indicates a normal cognitive level, while a score greater than 0 determines the presence of AD. In this context, we divided the images into 210 (70%) patients with AD and 90 (30%) HCs for the OASIS dataset and 149 (64.5%) patients with AD and 82 (35.5%) HCs for the ADNI dataset. The majority of the samples were identified as men, specifically 220 (73%) for OASIS and 132 (57%) for ADNI, while the majority of the samples were Caucasian, specifically 174 (58%) for OASIS and 159 (69%) for ADNI.</p><p>After the preprocessing steps, each slice of sMRI includes 256 &#x00D7; 256 &#x00D7; 176 voxels covering the entire region of the brain with the following parameters: voxel size is 2 &#x00D7; 2 &#x00D7; 2 mm<sup>3</sup> for ADNI and 2 &#x00D7; 3.1 &#x00D7; 2 mm<sup>3</sup> for OASIS, isotropic resolution is 1.0 mm, time of repetition is 5050 milliseconds, and time of echo is 10 milliseconds. All slices of reconstructed PET images are resampled to contain 256 &#x00D7; 256 &#x00D7; 207 voxels with a voxel size of 1.2 &#x00D7; 1.2 &#x00D7; 1.2 mm<sup>3</sup>.</p><p>The appropriate hyperparameter values for the classifiers were chosen by reviewing prior state-of-the-art work and after doing empirical testing and exploratory analyses. Some of the hyperparameters used in the experiment are presented in <xref ref-type="table" rid="table3">Table 3</xref>.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>The hyperparameter tuning and classifiers configuration used in the experiment.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Hyperparameter</td><td align="left" valign="bottom">Search range</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="2">Support vector machine</td></tr><tr><td align="left" valign="top">&#x2003;Multiclass method</td><td align="left" valign="top">One-vs-one (one-vs-all, one-vs-one)</td></tr><tr><td align="left" valign="top">&#x2003;Penality parameter of error</td><td align="left" valign="top">0.001 (0.0001, 0.001, 0.01, 0.1)</td></tr><tr><td align="left" valign="top">&#x2003;Box constraint level</td><td align="left" valign="top">1 (0.001&#x2010;1000)</td></tr><tr><td align="left" valign="top">&#x2003;Kernel function</td><td align="left" valign="top">Gaussian (Gaussian, linear, quadratic, cubic)</td></tr><tr><td align="left" valign="top">&#x2003;Kernel scale</td><td align="left" valign="top">2.8</td></tr><tr><td align="left" valign="top">&#x2003;Iteration</td><td align="left" valign="top">30</td></tr><tr><td align="left" valign="top">&#x2003;Standardize data</td><td align="left" valign="top">True</td></tr><tr><td align="left" valign="top" colspan="2">Feedforward neural network</td></tr><tr><td align="left" valign="top">&#x2003;Number of fully connected layers</td><td align="left" valign="top">1</td></tr><tr><td align="left" valign="top">&#x2003;First layer size</td><td align="left" valign="top">100</td></tr><tr><td align="left" valign="top">&#x2003;Activation</td><td align="left" valign="top">Hyperbolic tangent sigmoid</td></tr><tr><td align="left" valign="top">&#x2003;Learning function</td><td align="left" valign="top">Gradient descent with momentum weight and bias</td></tr><tr><td align="left" valign="top">&#x2003;Iteration limit</td><td align="left" valign="top">1000</td></tr><tr><td align="left" valign="top">&#x2003;Regularizarion strength (&#x03BB;)</td><td align="left" valign="top">0</td></tr><tr><td align="left" valign="top">&#x2003;Update of weight and bias</td><td align="left" valign="top">Levenberg-Marquardt optimization</td></tr><tr><td align="left" valign="top">&#x2003;Standardize data</td><td align="left" valign="top">True</td></tr><tr><td align="left" valign="top" colspan="2">Vision transformer</td></tr><tr><td align="left" valign="top">&#x2003;Layers</td><td align="left" valign="top">12</td></tr><tr><td align="left" valign="top">&#x2003;Hidden size D</td><td align="left" valign="top">768</td></tr><tr><td align="left" valign="top">&#x2003;Multilayer perceptron size</td><td align="left" valign="top">3072</td></tr><tr><td align="left" valign="top">&#x2003;Heads</td><td align="left" valign="top">12</td></tr><tr><td align="left" valign="top">&#x2003;Parameters</td><td align="left" valign="top">86 million</td></tr><tr><td align="left" valign="top">&#x2003;Path resolution</td><td align="left" valign="top">16 &#x00D7; 16</td></tr></tbody></table></table-wrap><p>For training and testing, 5-fold cross-validation was achieved on each dataset. For each fold, 70% of the data was used for training, 10% for validation, and 20% for testing the effectiveness of each classifier. We conducted experiments on SVM and FFNN using four dimensionality reduction techniques (VAF, LDA, t-SNE, and PCA), as well as on the ViT classifier, without and with data augmentation. During the training process, SVM and FFNN achieved the best results with PCA for the validation data, while the ViT classifier achieved the best results with increased data.</p><p>For the test data, we obtained for the OASIS dataset an accuracy of 91.9% (prediction speed ~2000 observations/second, training time 1.5703 seconds) for SVM, 88.2% (prediction speed ~6000 observations/second, training time 7.7715 seconds) for FFNN, and 93.2% (prediction speed ~7000 observations/second, training time 102.3529 seconds) for ViT. The same result was seen for the ADNI data, with an accuracy of 88.6% for SVM (prediction speed ~1300 observations/second, training time 1.4280 seconds), 80.9% for FFNN (prediction speed ~5300 observations/second, training time 8.2319 seconds), and 90.4% for ViT (prediction speed ~7200 observations/second, training time 129.4531 seconds). <xref ref-type="table" rid="table4">Tables 4</xref> and <xref ref-type="table" rid="table5">5</xref> provide further details about the top classification results achieved with the proposed ML classifiers for the OASIS and ADNI datasets, respectively, based on six metrics.</p><table-wrap id="t4" position="float"><label>Table 4.</label><caption><p>Five-fold cross-validation performance for the Open Access Series of Imaging Studies test data in terms of accuracy, sensitivity, specificity, precision, <italic>F</italic><sub>1</sub>-score, and area under the receiver operating characteristic curve (AUROC).</p></caption><table id="table4" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Classifier</td><td align="left" valign="bottom">Accuracy (%)</td><td align="left" valign="bottom">Sensitivity (%)</td><td align="left" valign="bottom">Specificity (%)</td><td align="left" valign="bottom">Precision (%)</td><td align="left" valign="bottom"><italic>F</italic><sub>1</sub>-score (%)</td><td align="left" valign="bottom">AUROC (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="7">Support vector machine</td></tr><tr><td align="left" valign="top">&#x2003;VAF<sup><xref ref-type="table-fn" rid="table4fn1">a</xref></sup></td><td align="left" valign="top">66.3</td><td align="left" valign="top">61.3</td><td align="left" valign="top">62.1</td><td align="left" valign="top">65.1</td><td align="left" valign="top">52.4</td><td align="left" valign="top">60</td></tr><tr><td align="left" valign="top">&#x2003;LDA<sup><xref ref-type="table-fn" rid="table4fn2">b</xref></sup></td><td align="left" valign="top">75.6</td><td align="left" valign="top">70.1</td><td align="left" valign="top">69</td><td align="left" valign="top">70.6</td><td align="left" valign="top">68.7</td><td align="left" valign="top">72</td></tr><tr><td align="left" valign="top">&#x2003;t-SNE<sup><xref ref-type="table-fn" rid="table4fn3">c</xref></sup></td><td align="left" valign="top">80.2</td><td align="left" valign="top">74.5</td><td align="left" valign="top">72.4</td><td align="left" valign="top">71.4</td><td align="left" valign="top">70.1</td><td align="left" valign="top">73</td></tr><tr><td align="left" valign="top">&#x2003;PCA<sup><xref ref-type="table-fn" rid="table4fn4">d</xref></sup></td><td align="left" valign="top"><italic>91.9</italic><sup><xref ref-type="table-fn" rid="table4fn5"><italic>e</italic></xref></sup></td><td align="left" valign="top"><italic>86.4</italic></td><td align="left" valign="top"><italic>90.6</italic></td><td align="left" valign="top"><italic>87.2</italic></td><td align="left" valign="top"><italic>89</italic></td><td align="left" valign="top"><italic>90</italic></td></tr><tr><td align="left" valign="top" colspan="7">Feedforward neural network</td></tr><tr><td align="left" valign="top">&#x2003;VAF</td><td align="left" valign="top">62.4</td><td align="left" valign="top">54.1</td><td align="left" valign="top">57.2</td><td align="left" valign="top">51.6</td><td align="left" valign="top">53.4</td><td align="left" valign="top">51</td></tr><tr><td align="left" valign="top">&#x2003;LDA</td><td align="left" valign="top">70.5</td><td align="left" valign="top">66.4</td><td align="left" valign="top">71.4</td><td align="left" valign="top">68.9</td><td align="left" valign="top">72.5</td><td align="left" valign="top">66</td></tr><tr><td align="left" valign="top">&#x2003;t-SNE</td><td align="left" valign="top">72.6</td><td align="left" valign="top">71.3</td><td align="left" valign="top">70.2</td><td align="left" valign="top">69.4</td><td align="left" valign="top">72.8</td><td align="left" valign="top">73</td></tr><tr><td align="left" valign="top">&#x2003;PCA</td><td align="left" valign="top">88.2</td><td align="left" valign="top">85.4</td><td align="left" valign="top">84.6</td><td align="left" valign="top">86.2</td><td align="left" valign="top">83.7</td><td align="left" valign="top">82</td></tr><tr><td align="left" valign="top" colspan="7">Vision transformer</td></tr><tr><td align="left" valign="top">&#x2003;Without data augmentation</td><td align="left" valign="top">60.8</td><td align="left" valign="top">53.1</td><td align="left" valign="top">54.6</td><td align="left" valign="top">56.8</td><td align="left" valign="top">55.6</td><td align="left" valign="top">61</td></tr><tr><td align="left" valign="top">&#x2003;With data augmentation</td><td align="left" valign="top"><italic>93.2</italic></td><td align="left" valign="top"><italic>87.2</italic></td><td align="left" valign="top"><italic>90.5</italic></td><td align="left" valign="top"><italic>87.6</italic></td><td align="left" valign="top"><italic>88.7</italic></td><td align="left" valign="top"><italic>92</italic></td></tr></tbody></table><table-wrap-foot><fn id="table4fn1"><p><sup>a</sup>VAF: voxels-as-features.</p></fn><fn id="table4fn2"><p><sup>b</sup>LDA: linear discriminant analysis.</p></fn><fn id="table4fn3"><p><sup>c</sup>t-SNE: t-distributed stochastic neighbor embedding<italic>.</italic></p></fn><fn id="table4fn4"><p><sup>d</sup>PCA: principal component analysis.</p></fn><fn id="table4fn5"><p><sup>e</sup>Italics indicate the best achieved results.</p></fn></table-wrap-foot></table-wrap><table-wrap id="t5" position="float"><label>Table 5.</label><caption><p>Five-fold cross-validation performance for Alzheimer&#x2019;s Disease Neuroimaging Initiative test data in terms of accuracy, sensitivity, specificity, precision, <italic>F</italic><sub>1</sub>-score, and area under the receiver operating characteristic curve (AUROC).</p></caption><table id="table5" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Classifier</td><td align="left" valign="bottom">Accuracy (%)</td><td align="left" valign="bottom">Sensitivity (%)</td><td align="left" valign="bottom">Specificity (%)</td><td align="left" valign="bottom">Precision (%)</td><td align="left" valign="bottom"><italic>F</italic><sub>1</sub>-score (%)</td><td align="left" valign="bottom">AUROC (%)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="7">Support vector machine</td></tr><tr><td align="left" valign="top">&#x2003;VAF<sup><xref ref-type="table-fn" rid="table5fn1">a</xref></sup></td><td align="left" valign="top">42.8</td><td align="left" valign="top">59.2</td><td align="left" valign="top">60.4</td><td align="left" valign="top">63.2</td><td align="left" valign="top">50.1</td><td align="left" valign="top">58</td></tr><tr><td align="left" valign="top">&#x2003;LDA<sup><xref ref-type="table-fn" rid="table5fn2">b</xref></sup></td><td align="left" valign="top">72.1</td><td align="left" valign="top">68.4</td><td align="left" valign="top">67.2</td><td align="left" valign="top">68.4</td><td align="left" valign="top">66.2</td><td align="left" valign="top">70</td></tr><tr><td align="left" valign="top">&#x2003;t-SNE<sup><xref ref-type="table-fn" rid="table5fn3">c</xref></sup></td><td align="left" valign="top">79.3</td><td align="left" valign="top">71.1</td><td align="left" valign="top">70.1</td><td align="left" valign="top">69.2</td><td align="left" valign="top">68.3</td><td align="left" valign="top">71</td></tr><tr><td align="left" valign="top">&#x2003;PCA<sup><xref ref-type="table-fn" rid="table5fn4">d</xref></sup></td><td align="left" valign="top"><italic>88.6<sup><xref ref-type="table-fn" rid="table5fn5">e</xref></sup></italic></td><td align="left" valign="top"><italic>84.1</italic></td><td align="left" valign="top"><italic>88.4</italic></td><td align="left" valign="top"><italic>85.1</italic></td><td align="left" valign="top"><italic>87.4</italic></td><td align="left" valign="top"><italic>88</italic></td></tr><tr><td align="left" valign="top" colspan="7">Feedforward neural network</td></tr><tr><td align="left" valign="top">&#x2003;VAF</td><td align="left" valign="top">60.9</td><td align="left" valign="top">51.3</td><td align="left" valign="top">56.4</td><td align="left" valign="top">49.1</td><td align="left" valign="top">51</td><td align="left" valign="top">48</td></tr><tr><td align="left" valign="top">&#x2003;LDA</td><td align="left" valign="top">69.1</td><td align="left" valign="top">62.3</td><td align="left" valign="top">70</td><td align="left" valign="top">65.4</td><td align="left" valign="top">70.1</td><td align="left" valign="top">63</td></tr><tr><td align="left" valign="top">&#x2003;t-SNE</td><td align="left" valign="top">70.4</td><td align="left" valign="top">68.1</td><td align="left" valign="top">68.4</td><td align="left" valign="top">67.1</td><td align="left" valign="top">70.4</td><td align="left" valign="top">70</td></tr><tr><td align="left" valign="top">&#x2003;PCA</td><td align="left" valign="top">80.9</td><td align="left" valign="top">84.1</td><td align="left" valign="top">82.3</td><td align="left" valign="top">84.3</td><td align="left" valign="top">81.4</td><td align="left" valign="top">80</td></tr><tr><td align="left" valign="top" colspan="7">Vision transformer</td></tr><tr><td align="left" valign="top">&#x2003;Without data augmentation</td><td align="left" valign="top">59.3</td><td align="left" valign="top">50.2</td><td align="left" valign="top">51.1</td><td align="left" valign="top">54.4</td><td align="left" valign="top">53.4</td><td align="left" valign="top">57</td></tr><tr><td align="left" valign="top">&#x2003;With data augmentation</td><td align="left" valign="top"><italic>90.4</italic></td><td align="left" valign="top"><italic>85.4</italic></td><td align="left" valign="top"><italic>88.6</italic></td><td align="left" valign="top"><italic>86.9</italic></td><td align="left" valign="top"><italic>88</italic></td><td align="left" valign="top"><italic>90</italic></td></tr></tbody></table><table-wrap-foot><fn id="table5fn1"><p><sup>a</sup>VAF: voxels-as-features.</p></fn><fn id="table5fn2"><p><sup>b</sup>LDA: linear discriminant analysis.</p></fn><fn id="table5fn3"><p><sup>c</sup>t-SNE: t-distributed stochastic neighbor embedding.</p></fn><fn id="table5fn4"><p><sup>d</sup>PCA: principal component analysis.</p></fn><fn id="table5fn5"><p><sup>e</sup>Italics indicate the best achieved results.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Main Findings</title><p>The main finding is that the development of diagnostic tools applying the ML approach in conjunction with neuroimaging data could substantially help in automating the classification and prediction of AD.</p><p>In this context, this study proposed a complete CAD system to successfully classify patients with AD and discriminate them from HC patients. The purpose was to examine the association between SVM, FFNN, and ViT ML classifiers; PCA, LDA, and t-SNE dimensionality reduction techniques; and sMRI and FDG-PET neuroimaging modalities to detect early signs of AD. Furthermore, we aimed to clarify the impact of some data preprocessing strategies, such as noise reduction and data augmentation, on improving the performance of classifiers.</p><p>With regard to the sMRI and FDG-PET modalities, they can provide large amounts of information; nevertheless, interpreting all image content is challenging for physicians. The experimental analysis demonstrates that combining these neuroimaging modalities with selected ML classifiers enhances their performance, enabling doctors to provide precise diagnosis and timely patient care. This confirms the theory regarding the benefits of these two modalities. Since sMRI provides high-resolution images of brain anatomical structures, which confirm structural change in the brain, it shows shrinkage of brain tissue and abnormalities, while FDG-PET shows the functionality of the brain.</p><p>Regarding the selected dimensional reduction techniques, all of the chosen dimensional reduction techniques performed well as feature extractors when combined with the SVM and FFNN classifiers, but a comparative analysis of the three techniques reveals that PCA outperforms LDA and t-SNE. However, it is important to clarify certain findings: PCA allows the identification of the most significant variables in the data due to its potential to generate new variables, which represent linear combinations of the original variables. Moreover, t-SNE differs from PCA by preserving only small pairwise distances or local similarities, while PCA aims to preserve large pairwise distances to maximize variance. Unlike PCA, LDA is a supervised technique that maximizes class separability in the reduced dimensionality space, thereby retaining the most discriminative features.</p><p>Preliminary results from evaluating the complete CAD system using the three classifiers prove that the system is more effective in separating AD and HC classes. The results provided by all the experiments carried out reveal an increase in sensitivity and, consequently, the final accuracy obtained by the basic VAF-SVM model (66.3% for OASIS and 42.8% for ADNI). We compared the performance of the SVM, FFNN, and ViT models using confusion matrix&#x2013;based metrics.</p><p>All models performed well, providing acceptable performance for both databases. Data augmentation/ViT outperformed other models, with accuracies of 93.2% for OASIS and 90.4% for ADNI (see <xref ref-type="table" rid="table4">Tables 4</xref> and <xref ref-type="table" rid="table5">5</xref> for more details on results obtained from all models tested on both databases). The second best classifier is PCA/SVM, achieving an accuracy decrease of 1.3% for OASIS and 1.8% for ADNI, compared to the rates obtained by ViT, resulting in overall accuracy rates of 91.9% and 88.6% for OASIS and ADNI, respectively. Therefore, the data augmentation process and the PCA dimensionality reduction method have the potential to impact the overall performance of the ViT and SVM models, respectively.</p><p>Moreover, compared to the performance using a single MRI modality, all models performed well using a multimodal MRI/PET environment. The best results with MRI were also obtained with ViT and SVM classifiers. Accuracies of 83.9% for the OASIS dataset and 81.2% for ADNI were obtained using the data augmentation/ViT approach. PCA/SVM achieved accuracies of 82.4% for the OASIS and 80.6% for the ADNI datasets. This draws attention to the potential of integrating multiple modalities to increase the performance of the CAD system.</p></sec><sec id="s4-2"><title>Comparison With Prior Work</title><p>To verify the convergence of the proposed CAD system, we compared the results obtained with some relevant state-of-the-art ML models. The experimental results show that our models, particularly SVM and ViT, have good performance on both the OASIS and ADNI datasets and achieved better or comparable accuracy to most existing methods in the literature. For the OASIS dataset, the PCA/SVM method had a 91.9% accuracy and the ViT model with data augmentation had a 93.2% accuracy. Nanni et al [<xref ref-type="bibr" rid="ref33">33</xref>], Khan and Zubair [<xref ref-type="bibr" rid="ref16">16</xref>], Sethi et al [<xref ref-type="bibr" rid="ref2">2</xref>], Basheer et al [<xref ref-type="bibr" rid="ref34">34</xref>], Saratxaga et al [<xref ref-type="bibr" rid="ref35">35</xref>], and Liu et al [<xref ref-type="bibr" rid="ref36">36</xref>] got 90.2%, 86.8%, 86.2%, 92.3%, 93%, and 82.6% accuracy, respectively.</p><p>The same finding was obtained for the ADNI dataset, where we achieved an accuracy of 88.6% using the PCA/SVM approach and 90.4% using the ViT model by increasing the data. In contrast, the accuracy achieved by Rallabandi et al [<xref ref-type="bibr" rid="ref37">37</xref>], Jo et al [<xref ref-type="bibr" rid="ref4">4</xref>], Jo et al [<xref ref-type="bibr" rid="ref3">3</xref>], Liu et al [<xref ref-type="bibr" rid="ref36">36</xref>], and Shojaei et al [<xref ref-type="bibr" rid="ref38">38</xref>] was 75%, 75.02%, 80.8%, 90%, and 87%, respectively. <xref ref-type="table" rid="table6">Table 6</xref> compares our best results obtained with the prior state-of-the-art models discussed.</p><table-wrap id="t6" position="float"><label>Table 6.</label><caption><p>Comparative study of performance with state-of-the-art machine learning models using the Open Access Series of Imaging Studies (OASIS) and Alzheimer&#x2019;s Disease Neuroimaging Initiative (ADNI) datasets.</p></caption><table id="table6" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Study</td><td align="left" valign="bottom">Approach</td><td align="left" valign="bottom">Dataset</td><td align="left" valign="bottom">Accuracy</td><td align="left" valign="bottom">Sensitivity</td><td align="left" valign="bottom"><italic>F</italic><sub>1</sub>-score</td><td align="left" valign="bottom">AUROC<sup><xref ref-type="table-fn" rid="table6fn1">a</xref></sup></td></tr></thead><tbody><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">Monte Carlo sampling/ResNet50-CNNs<sup><xref ref-type="table-fn" rid="table6fn2">b</xref></sup>/ensemble classifier</td><td align="left" valign="top">OASIS</td><td align="left" valign="top">82.6</td><td align="left" valign="top">74.3</td><td align="left" valign="top">&#x2014;<sup><xref ref-type="table-fn" rid="table6fn3">c</xref></sup></td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Saratxaga et al [<xref ref-type="bibr" rid="ref35">35</xref>]</td><td align="left" valign="top">ResNet18-based CNNs</td><td align="left" valign="top">OASIS</td><td align="left" valign="top">93</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Basheer et al [<xref ref-type="bibr" rid="ref34">34</xref>]</td><td align="left" valign="top">PCA<sup><xref ref-type="table-fn" rid="table6fn4">d</xref></sup>/ CapsNet-based CNNs</td><td align="left" valign="top">OASIS</td><td align="left" valign="top">92.3</td><td align="left" valign="top">82.3</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Nanni et al [<xref ref-type="bibr" rid="ref33">33</xref>]</td><td align="left" valign="top">Ensemble of 5 transfer learning models</td><td align="left" valign="top">OASIS</td><td align="left" valign="top">90.2</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Khan and Zubair [<xref ref-type="bibr" rid="ref16">16</xref>]</td><td align="left" valign="top">Chi-square statistical test/RF<sup><xref ref-type="table-fn" rid="table6fn5">e</xref></sup></td><td align="left" valign="top">OASIS</td><td align="left" valign="top">86.8</td><td align="left" valign="top">80</td><td align="left" valign="top">86.4</td><td align="left" valign="top">87.2</td></tr><tr><td align="left" valign="top">Sethi et al [<xref ref-type="bibr" rid="ref2">2</xref>]</td><td align="left" valign="top">CNNs/ SVM<sup><xref ref-type="table-fn" rid="table6fn6">f</xref></sup></td><td align="left" valign="top">OASIS</td><td align="left" valign="top">86.2</td><td align="left" valign="top"><bold>&#x2014;</bold></td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Our study</td><td align="left" valign="top">PCA/SVM</td><td align="left" valign="top">OASIS</td><td align="left" valign="top">91.9</td><td align="left" valign="top">86.4</td><td align="left" valign="top">89</td><td align="left" valign="top">90</td></tr><tr><td align="left" valign="top">Our study</td><td align="left" valign="top">Data augmentation/ViT<sup><xref ref-type="table-fn" rid="table6fn7">g</xref></sup></td><td align="left" valign="top">OASIS</td><td align="left" valign="top"><italic>93.2<sup><xref ref-type="table-fn" rid="table6fn8">h</xref></sup></italic></td><td align="left" valign="top"><italic>87.2</italic></td><td align="left" valign="top"><italic>88.7</italic></td><td align="left" valign="top"><italic>92</italic></td></tr><tr><td align="left" valign="top">Shojaei et al [<xref ref-type="bibr" rid="ref38">38</xref>]</td><td align="left" valign="top">Genetic algorithm/3D-CNNs</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">87</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref36">36</xref>]</td><td align="left" valign="top">Monte Carlo sampling/ResNet50-CNNs/ensemble classifier</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">90</td><td align="left" valign="top">83.5</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Rallabandi et al [<xref ref-type="bibr" rid="ref37">37</xref>]</td><td align="left" valign="top">FreeSurfer/SVM</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">75</td><td align="left" valign="top">75</td><td align="left" valign="top">72</td><td align="left" valign="top">76</td></tr><tr><td align="left" valign="top">Jo et al [<xref ref-type="bibr" rid="ref4">4</xref>]</td><td align="left" valign="top">Sliding Window Association Test/CNNs</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">75</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">82</td></tr><tr><td align="left" valign="top">Jo et al [<xref ref-type="bibr" rid="ref3">3</xref>]</td><td align="left" valign="top">Weighted gene coexpression network analysis/RF</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">80.8</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">80.8</td></tr><tr><td align="left" valign="top">Our study</td><td align="left" valign="top">PCA/SVM</td><td align="left" valign="top">ADNI</td><td align="left" valign="top">88.6</td><td align="left" valign="top">84.1</td><td align="left" valign="top">87.4</td><td align="left" valign="top">88</td></tr><tr><td align="left" valign="top">Our study</td><td align="left" valign="top">Data augmentation/ViT</td><td align="left" valign="top">ADNI</td><td align="left" valign="top"><italic>90.4</italic></td><td align="left" valign="top"><italic>85.4</italic></td><td align="left" valign="top"><italic>88</italic></td><td align="left" valign="top"><italic>90</italic></td></tr></tbody></table><table-wrap-foot><fn id="table6fn1"><p><sup>a</sup>AUROC: area under the receiver operating characteristic curve.</p></fn><fn id="table6fn2"><p><sup>b</sup>CNN: convolutional neural network.</p></fn><fn id="table6fn3"><p><sup>c</sup>Not available.</p></fn><fn id="table6fn4"><p><sup>d</sup>PCA: principal component analysis.</p></fn><fn id="table6fn5"><p><sup>e</sup>RF: random forest.</p></fn><fn id="table6fn6"><p><sup>f</sup>SVM: support vector machine.</p></fn><fn id="table6fn7"><p><sup>g</sup>ViT: vision transformer.</p></fn><fn id="table6fn8"><p><sup>h</sup>Italics indicate the best achieved results.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s4-3"><title>Limitations and Future Directions</title><p>There are several improvements possible for the proposed CAD system. We aim to enhance the system&#x2019;s performance by collaborating with more extensive AD datasets and implementing various types of ANN and ML-based classifiers.</p><p>The PCA used for feature extraction looks for the principal axis direction, which is used to effectively represent the common features of similar samples. This is very effective for representing the common features of the same kind of data samples, but it is not suitable for distinguishing different sample classes. Therefore, to achieve the purpose of feature extraction, we need to combine PCA with other feature dimensionality reduction algorithms like uniform manifold approximation and projection.</p></sec></sec></body><back><ack><p>This project was supported by &#x201C;Fonds de recherche du Qu&#x00E9;bec-Nature et Technologies -FRQNT&#x201D; grants under awards B3X &#x00D7; 314498 and B3XR &#x00D7; 358107.</p><p>The author would like to thank &#x201C;Fonds de recherche du Qu&#x00E9;bec-Nature et Technologies -FRQNT&#x201D; for the financial support offered to accomplish this project. Many thanks to the researchers and expert clinicians of the Open Access Series of Imaging Studies and Alzheimer&#x2019;s Disease Neuroimaging Initiative (ADNI) datasets for developing the images used in the preparation of this work. Special thanks to the reviewers and proofreader (Joshua Dykas) of this work.</p><p>Data used in preparation of this article were obtained from the ADNI database. As such, the investigators within the ADNI contributed to the design and implementation of ADNI or provided data but did not participate in the analysis or writing of this report. A complete listing of ADNI investigators can be found at [<xref ref-type="bibr" rid="ref39">39</xref>].</p></ack><notes><sec><title>Data Availability</title><p>This study used two datasets, Open Access Series of Imaging Studies [<xref ref-type="bibr" rid="ref26">26</xref>] and Alzheimer&#x2019;s Disease Neuroimaging Initiative [<xref ref-type="bibr" rid="ref27">27</xref>], which are available in the public domain. However, they are subject to restrictions because they were used under permissions for this study and are therefore not publicly available.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AD</term><def><p>Alzheimer disease</p></def></def-item><def-item><term id="abb2">ADNI</term><def><p>Alzheimer&#x2019;s Disease Neuroimaging Initiative</p></def></def-item><def-item><term id="abb3">ANN</term><def><p>artificial neural network</p></def></def-item><def-item><term id="abb4">AUROC</term><def><p>area under the receiver operating characteristic curve</p></def></def-item><def-item><term id="abb5">CAD</term><def><p>computer-aided diagnosis</p></def></def-item><def-item><term id="abb6">FDG</term><def><p>fluorodeoxyglucose</p></def></def-item><def-item><term id="abb7">FFNN</term><def><p>feedforward neural network</p></def></def-item><def-item><term id="abb8">FN</term><def><p>false negative</p></def></def-item><def-item><term id="abb9">FP</term><def><p>false positive</p></def></def-item><def-item><term id="abb10">HC</term><def><p>healthy control</p></def></def-item><def-item><term id="abb11">LDA</term><def><p>linear discriminant analysis</p></def></def-item><def-item><term id="abb12">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb13">MRI</term><def><p>magnetic resonance imagining</p></def></def-item><def-item><term id="abb14">N3</term><def><p>nonparametric nonuniform intensity normalization</p></def></def-item><def-item><term id="abb15">OASIS</term><def><p>Open Access Series of Imaging Studies</p></def></def-item><def-item><term id="abb16">PCA</term><def><p>principal component analysis</p></def></def-item><def-item><term id="abb17">PET</term><def><p>positron emission tomography</p></def></def-item><def-item><term id="abb18">sMRI</term><def><p>structural magnetic resonance imaging</p></def></def-item><def-item><term id="abb19">SPM</term><def><p>statistical parametric mapping</p></def></def-item><def-item><term id="abb20">SVM</term><def><p>support vector machine</p></def></def-item><def-item><term id="abb21">t-SNE</term><def><p>t-distributed stochastic neighbor embedding</p></def></def-item><def-item><term id="abb22">TN</term><def><p>true negative</p></def></def-item><def-item><term id="abb23">TP</term><def><p>true positive</p></def></def-item><def-item><term id="abb24">VAF</term><def><p>voxels-as-features</p></def></def-item><def-item><term id="abb25">ViT</term><def><p>vision transformer</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Sun</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Nian</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Self-explainable graph neural network for Alzheimer disease and related dementias risk prediction: algorithm development and validation study</article-title><source>JMIR Aging</source><year>2024</year><month>07</month><day>8</day><volume>7</volume><fpage>e54748</fpage><pub-id pub-id-type="doi">10.2196/54748</pub-id><pub-id pub-id-type="medline">38976869</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sethi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Rani</surname><given-names>S</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>A</given-names> </name><name name-style="western"><surname>Maz&#x00F3;n</surname><given-names>JLV</given-names> </name></person-group><article-title>A CAD system for Alzheimer&#x2019;s disease classification using neuroimaging MRI 2D slices</article-title><source>Comput Math Methods Med</source><year>2022</year><month>08</month><day>9</day><volume>2022</volume><fpage>8680737</fpage><pub-id pub-id-type="doi">10.1155/2022/8680737</pub-id><pub-id pub-id-type="medline">35983528</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jo</surname><given-names>T</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>J</given-names> </name><name name-style="western"><surname>Bice</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Circular-SWAT for deep learning based diagnostic classification of Alzheimer&#x2019;s disease: application to metabolome data</article-title><source>EBioMedicine</source><year>2023</year><month>11</month><volume>97</volume><fpage>104820</fpage><pub-id pub-id-type="doi">10.1016/j.ebiom.2023.104820</pub-id><pub-id pub-id-type="medline">37806288</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jo</surname><given-names>T</given-names> </name><name name-style="western"><surname>Nho</surname><given-names>K</given-names> </name><name name-style="western"><surname>Bice</surname><given-names>P</given-names> </name><name name-style="western"><surname>Saykin</surname><given-names>AJ</given-names> </name><collab>Alzheimer&#x2019;s Disease Neuroimaging Initiative</collab></person-group><article-title>Deep learning-based identification of genetic variants: application to Alzheimer&#x2019;s disease classification</article-title><source>Brief Bioinform</source><year>2022</year><month>03</month><day>10</day><volume>23</volume><issue>2</issue><fpage>bbac022</fpage><pub-id pub-id-type="doi">10.1093/bib/bbac022</pub-id><pub-id pub-id-type="medline">35183061</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lazli</surname><given-names>L</given-names> </name><name name-style="western"><surname>Boukadoum</surname><given-names>M</given-names> </name><name name-style="western"><surname>Mohamed</surname><given-names>OA</given-names> </name></person-group><article-title>A survey on computer-aided diagnosis of brain disorders through MRI based on machine learning and data mining methodologies with an emphasis on Alzheimer disease diagnosis and the contribution of the multimodal fusion</article-title><source>Appl Sci (Basel)</source><year>2020</year><volume>10</volume><issue>5</issue><fpage>1894</fpage><pub-id pub-id-type="doi">10.3390/app10051894</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lazli</surname><given-names>L</given-names> </name><name name-style="western"><surname>Boukadoum</surname><given-names>M</given-names> </name><name name-style="western"><surname>Ait Mohamed</surname><given-names>O</given-names> </name></person-group><article-title>Computer-aided diagnosis system of Alzheimer&#x2019;s disease based on multimodal fusion: tissue quantification based on the hybrid fuzzy-genetic-possibilistic model and discriminative classification based on the SVDD model</article-title><source>Brain Sci</source><year>2019</year><month>10</month><day>22</day><volume>9</volume><issue>10</issue><fpage>289</fpage><pub-id pub-id-type="doi">10.3390/brainsci9100289</pub-id><pub-id pub-id-type="medline">31652635</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Groppell</surname><given-names>S</given-names> </name><name name-style="western"><surname>Soto-Ruiz</surname><given-names>KM</given-names> </name><name name-style="western"><surname>Flores</surname><given-names>B</given-names> </name><etal/></person-group><article-title>A rapid, mobile neurocognitive screening test to aid in identifying cognitive impairment and dementia (BrainCheck): cohort study</article-title><source>JMIR Aging</source><year>2019</year><month>03</month><day>21</day><volume>2</volume><issue>1</issue><fpage>e12615</fpage><pub-id pub-id-type="doi">10.2196/12615</pub-id><pub-id pub-id-type="medline">31518280</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Eke</surname><given-names>CS</given-names> </name><name name-style="western"><surname>Jammeh</surname><given-names>E</given-names> </name><name name-style="western"><surname>Li</surname><given-names>X</given-names> </name><name name-style="western"><surname>Carroll</surname><given-names>CB</given-names> </name><name name-style="western"><surname>Pearson</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ifeachor</surname><given-names>EC</given-names> </name></person-group><article-title>Early detection of Alzheimer&#x2019;s disease with blood plasma proteins using support vector machines</article-title><source>IEEE J Biomed Health Inform</source><year>2021</year><month>01</month><volume>25</volume><issue>1</issue><fpage>218</fpage><lpage>226</lpage><pub-id pub-id-type="doi">10.1109/JBHI.2020.2984355</pub-id><pub-id pub-id-type="medline">32340968</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Cai</surname><given-names>W</given-names> </name><name name-style="western"><surname>Chu</surname><given-names>C</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>X</given-names> </name></person-group><article-title>Intelligent classification of Alzheimer&#x2019;s disease based on support vector machine</article-title><conf-name>3rd International Conference on Applied Mathematics, Modelling and Intelligent Computing (CAMMIC 2023)</conf-name><conf-date>Mar 24-26, 2023</conf-date><conf-loc>Tangshan, China</conf-loc><pub-id pub-id-type="doi">10.1117/12.2686137</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kang</surname><given-names>K</given-names> </name><name name-style="western"><surname>Cai</surname><given-names>J</given-names> </name><name name-style="western"><surname>Song</surname><given-names>X</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>H</given-names> </name></person-group><article-title>Bayesian hidden Markov models for delineating the pathology of Alzheimer&#x2019;s disease</article-title><source>Stat Methods Med Res</source><year>2019</year><month>07</month><volume>28</volume><issue>7</issue><fpage>2112</fpage><lpage>2124</lpage><pub-id pub-id-type="doi">10.1177/0962280217748675</pub-id><pub-id pub-id-type="medline">29278101</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tahami Monfared</surname><given-names>AA</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>S</given-names> </name><name name-style="western"><surname>Hummel</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Estimating transition probabilities across the Alzheimer&#x2019;s disease continuum using a nationally representative real-world database in the United States</article-title><source>Neurol Ther</source><year>2023</year><month>08</month><volume>12</volume><issue>4</issue><fpage>1235</fpage><lpage>1255</lpage><pub-id pub-id-type="doi">10.1007/s40120-023-00498-1</pub-id><pub-id pub-id-type="medline">37256433</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Elgammal</surname><given-names>YM</given-names> </name><name name-style="western"><surname>Zahran</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Abdelsalam</surname><given-names>MM</given-names> </name></person-group><article-title>A new strategy for the early detection of alzheimer disease stages using multifractal geometry analysis based on k-nearest neighbor algorithm</article-title><source>Sci Rep</source><year>2022</year><month>12</month><day>26</day><volume>12</volume><issue>1</issue><fpage>22381</fpage><pub-id pub-id-type="doi">10.1038/s41598-022-26958-6</pub-id><pub-id pub-id-type="medline">36572791</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lu</surname><given-names>D</given-names> </name><name name-style="western"><surname>Yue</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Xu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Tong</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Ma</surname><given-names>H</given-names> </name></person-group><article-title>Effective detection of Alzheimer&#x2019;s disease by optimizing fuzzy k-nearest neighbors based on salp swarm algorithm</article-title><source>Comput Biol Med</source><year>2023</year><month>06</month><volume>159</volume><fpage>106930</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2023.106930</pub-id><pub-id pub-id-type="medline">37087779</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jha</surname><given-names>D</given-names> </name><name name-style="western"><surname>Alam</surname><given-names>S</given-names> </name><name name-style="western"><surname>Pyun</surname><given-names>JY</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>KH</given-names> </name><name name-style="western"><surname>Kwon</surname><given-names>GR</given-names> </name></person-group><article-title>Alzheimer&#x2019;s disease detection using extreme learning machine, complex dual tree wavelet principal coefficients and linear discriminant analysis</article-title><source>J Med Imaging Health Inform</source><year>2018</year><month>06</month><day>1</day><volume>8</volume><issue>5</issue><fpage>881</fpage><lpage>890</lpage><pub-id pub-id-type="doi">10.1166/jmihi.2018.2381</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lin</surname><given-names>W</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Du</surname><given-names>M</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>W</given-names> </name><name name-style="western"><surname>Tong</surname><given-names>T</given-names> </name></person-group><article-title>Multiclass diagnosis of stages of Alzheimer&#x2019;s disease using linear discriminant analysis scoring for multimodal data</article-title><source>Comput Biol Med</source><year>2021</year><month>07</month><volume>134</volume><fpage>104478</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2021.104478</pub-id><pub-id pub-id-type="medline">34000523</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Khan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Zubair</surname><given-names>S</given-names> </name></person-group><article-title>An improved multi-modal based machine learning approach for the prognosis of Alzheimer&#x2019;s disease</article-title><source>J King Saud Univ Comput Inf Sci</source><year>2022</year><month>06</month><volume>34</volume><issue>6</issue><fpage>2688</fpage><lpage>2706</lpage><pub-id pub-id-type="doi">10.1016/j.jksuci.2020.04.004</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shastry</surname><given-names>KA</given-names> </name><name name-style="western"><surname>Sattar</surname><given-names>SA</given-names> </name></person-group><article-title>Logistic random forest boosting technique for Alzheimer&#x2019;s diagnosis</article-title><source>Int J Inf Technol</source><year>2023</year><volume>15</volume><issue>3</issue><fpage>1719</fpage><lpage>1731</lpage><pub-id pub-id-type="doi">10.1007/s41870-023-01187-w</pub-id><pub-id pub-id-type="medline">37056794</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Costa</surname><given-names>A</given-names> </name><name name-style="western"><surname>Pais</surname><given-names>M</given-names> </name><name name-style="western"><surname>Loureiro</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Decision tree-based classification as a support to diagnosis in the Alzheimer&#x2019;s disease continuum using cerebrospinal fluid biomarkers: insights from automated analysis</article-title><source>Braz J Psychiatry</source><year>2022</year><month>08</month><day>30</day><volume>44</volume><issue>4</issue><fpage>370</fpage><lpage>377</lpage><pub-id pub-id-type="doi">10.47626/1516-4446-2021-2277</pub-id><pub-id pub-id-type="medline">35739065</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bhagya Shree</surname><given-names>SR</given-names> </name><name name-style="western"><surname>Sheshadri</surname><given-names>HS</given-names> </name></person-group><article-title>Diagnosis of Alzheimer&#x2019;s disease using naive Bayesian classifier</article-title><source>Neural Comput Applic</source><year>2018</year><month>01</month><volume>29</volume><issue>1</issue><fpage>123</fpage><lpage>132</lpage><pub-id pub-id-type="doi">10.1007/s00521-016-2416-3</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Chandra</surname><given-names>A</given-names> </name><name name-style="western"><surname>Roy</surname><given-names>S</given-names> </name></person-group><article-title>On the detection of alzheimer&#x2019;s disease using na&#x00EF;ve bayes classifier</article-title><conf-name>2023 International Conference on Microwave, Optical, and Communication Engineering (ICMOCE)</conf-name><conf-date>May 26-28, 2023</conf-date><conf-loc>Bhubaneswar, India</conf-loc><pub-id pub-id-type="doi">10.1109/ICMOCE57812.2023.10166516</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Amoroso</surname><given-names>N</given-names> </name><name name-style="western"><surname>Diacono</surname><given-names>D</given-names> </name><name name-style="western"><surname>Fanizzi</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Deep learning reveals Alzheimer&#x2019;s disease onset in MCI subjects: results from an international challenge</article-title><source>J Neurosci Methods</source><year>2018</year><month>05</month><day>15</day><volume>302</volume><fpage>3</fpage><lpage>9</lpage><pub-id pub-id-type="doi">10.1016/j.jneumeth.2017.12.011</pub-id><pub-id pub-id-type="medline">29287745</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lella</surname><given-names>E</given-names> </name><name name-style="western"><surname>Lombardi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Amoroso</surname><given-names>N</given-names> </name><etal/></person-group><article-title>Machine learning and DWI brain communicability networks for Alzheimer&#x2019;s disease detection</article-title><source>Appl Sci (Basel)</source><year>2020</year><volume>10</volume><issue>3</issue><fpage>934</fpage><pub-id pub-id-type="doi">10.3390/app10030934</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kotsiantis</surname><given-names>SB</given-names> </name><name name-style="western"><surname>Zaharakis</surname><given-names>ID</given-names> </name><name name-style="western"><surname>Pintelas</surname><given-names>PE</given-names> </name></person-group><article-title>Machine learning: a review of classification and combining techniques</article-title><source>Artif Intell Rev</source><year>2006</year><month>11</month><day>10</day><volume>26</volume><issue>3</issue><fpage>159</fpage><lpage>190</lpage><pub-id pub-id-type="doi">10.1007/s10462-007-9052-3</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Boesch</surname><given-names>G</given-names> </name></person-group><article-title>Vision transformers (ViT) in image recognition</article-title><source>viso.ai</source><year>2023</year><month>11</month><day>25</day><access-date>2025-03-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://viso.ai/deep-learning/vision-transformer-vit/">https://viso.ai/deep-learning/vision-transformer-vit/</ext-link></comment></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jolliffe</surname><given-names>IT</given-names> </name><name name-style="western"><surname>Cadima</surname><given-names>J</given-names> </name></person-group><article-title>Principal component analysis: a review and recent developments</article-title><source>Philos Trans A Math Phys Eng Sci</source><year>2016</year><month>04</month><day>13</day><volume>374</volume><issue>2065</issue><fpage>20150202</fpage><pub-id pub-id-type="doi">10.1098/rsta.2015.0202</pub-id><pub-id pub-id-type="medline">26953178</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="web"><article-title>Open Access Series of Imaging Studies (OASIS)</article-title><source>WashU Sites</source><access-date>2021-11-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://sites.wustl.edu/oasisbrains/">https://sites.wustl.edu/oasisbrains/</ext-link></comment></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="web"><source>Alzheimer&#x2019;s Disease Neuroimaging Initiative</source><access-date>2021-11-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="http://www.adni-info.org/">http://www.adni-info.org/</ext-link></comment></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="web"><article-title>Terms of use</article-title><source>Alzheimer&#x2019;s Disease Neuroimaging Initiative</source><access-date>2021-11-16</access-date><comment><ext-link ext-link-type="uri" xlink:href="http://adni.loni.usc.edu/terms-of-use/">http://adni.loni.usc.edu/terms-of-use/</ext-link></comment></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="web"><article-title>SPM8</article-title><source>Wellcome Centre for Human Neuroimaging</source><access-date>2022-10-20</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://www.fil.ion.ucl.ac.uk/spm/software/spm8/">https://www.fil.ion.ucl.ac.uk/spm/software/spm8/</ext-link></comment></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="web"><person-group person-group-type="author"><name name-style="western"><surname>Sled</surname><given-names>JG</given-names> </name></person-group><article-title>The MNI_N3 software package</article-title><source>McConnell Brain Imaging Centre</source><year>2004</year><month>03</month><day>15</day><access-date>2022-10-23</access-date><comment><ext-link ext-link-type="uri" xlink:href="http://www.bic.mni.mcgill.ca/software/N3/">http://www.bic.mni.mcgill.ca/software/N3/</ext-link></comment></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Garcea</surname><given-names>F</given-names> </name><name name-style="western"><surname>Serra</surname><given-names>A</given-names> </name><name name-style="western"><surname>Lamberti</surname><given-names>F</given-names> </name><name name-style="western"><surname>Morra</surname><given-names>L</given-names> </name></person-group><article-title>Data augmentation for medical imaging: a systematic literature review</article-title><source>Comput Biol Med</source><year>2023</year><month>01</month><volume>152</volume><fpage>106391</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2022.106391</pub-id><pub-id pub-id-type="medline">36549032</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Maaten</surname><given-names>L</given-names> </name><name name-style="western"><surname>Hinton</surname><given-names>G</given-names> </name></person-group><article-title>Viualizing data using t-SNE</article-title><source>J Mach Learn Res</source><year>2008</year><access-date>2024-04-04</access-date><volume>9</volume><fpage>2579</fpage><lpage>2605</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf">https://www.jmlr.org/papers/volume9/vandermaaten08a/vandermaaten08a.pdf</ext-link></comment></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Nanni</surname><given-names>L</given-names> </name><name name-style="western"><surname>Interlenghi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Brahnam</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Comparison of transfer learning and conventional machine learning applied to structural brain MRI for the early diagnosis and prognosis of Alzheimer&#x2019;s disease</article-title><source>Front Neurol</source><year>2020</year><month>11</month><day>5</day><volume>11</volume><fpage>576194</fpage><pub-id pub-id-type="doi">10.3389/fneur.2020.576194</pub-id><pub-id pub-id-type="medline">33250847</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Basheer</surname><given-names>S</given-names> </name><name name-style="western"><surname>Bhatia</surname><given-names>S</given-names> </name><name name-style="western"><surname>Sakri</surname><given-names>SB</given-names> </name></person-group><article-title>Computational modeling of dementia prediction using deep neural network: analysis on OASIS dataset</article-title><source>IEEE Access</source><year>2021</year><month>03</month><day>17</day><volume>9</volume><fpage>42449</fpage><lpage>42462</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2021.3066213</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Saratxaga</surname><given-names>CL</given-names> </name><name name-style="western"><surname>Moya</surname><given-names>I</given-names> </name><name name-style="western"><surname>Pic&#x00F3;n</surname><given-names>A</given-names> </name><etal/></person-group><article-title>MRI deep learning-based solution for Alzheimer&#x2019;s disease prediction</article-title><source>J Pers Med</source><year>2021</year><month>09</month><day>9</day><volume>11</volume><issue>9</issue><fpage>902</fpage><pub-id pub-id-type="doi">10.3390/jpm11090902</pub-id><pub-id pub-id-type="medline">34575679</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>C</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>F</given-names> </name><name name-style="western"><surname>Qiu</surname><given-names>A</given-names> </name><collab>Alzheimer&#x2019;s Disease Neuroimaging Initiative</collab></person-group><article-title>Monte Carlo ensemble neural network for the diagnosis of Alzheimer&#x2019;s disease</article-title><source>Neural Netw</source><year>2023</year><month>02</month><volume>159</volume><fpage>14</fpage><lpage>24</lpage><pub-id pub-id-type="doi">10.1016/j.neunet.2022.10.032</pub-id><pub-id pub-id-type="medline">36525914</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rallabandi</surname><given-names>VPS</given-names> </name><name name-style="western"><surname>Tulpule</surname><given-names>K</given-names> </name><name name-style="western"><surname>Gattu</surname><given-names>M</given-names> </name></person-group><article-title>Automatic classification of cognitively normal, mild cognitive impairment and Alzheimer&#x2019;s disease using structural MRI analysis</article-title><source>Inform Med Unlocked</source><year>2020</year><volume>18</volume><fpage>100305</fpage><pub-id pub-id-type="doi">10.1016/j.imu.2020.100305</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shojaei</surname><given-names>S</given-names> </name><name name-style="western"><surname>Saniee Abadeh</surname><given-names>M</given-names> </name><name name-style="western"><surname>Momeni</surname><given-names>Z</given-names> </name></person-group><article-title>An evolutionary explainable deep learning approach for Alzheimer&#x2019;s MRI classification</article-title><source>Expert Syst Appl</source><year>2023</year><month>06</month><day>15</day><volume>220</volume><fpage>119709</fpage><pub-id pub-id-type="doi">10.1016/j.eswa.2023.119709</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="web"><article-title>Acknowledgement list for ADNI publications</article-title><source>Alzheimer&#x2019;s Disease Neuroimaging Initiative</source><access-date>2025-04-09</access-date><comment><ext-link ext-link-type="uri" xlink:href="https://adni.loni.usc.edu/wp-content/uploads/how_to_apply/ADNI_Acknowledgement_List.pdf">https://adni.loni.usc.edu/wp-content/uploads/how_to_apply/ADNI_Acknowledgement_List.pdf</ext-link></comment></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Principal component analysis.</p><media xlink:href="xmed_v6i1e60866_app1.docx" xlink:title="DOCX File, 18 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Support vector machines.</p><media xlink:href="xmed_v6i1e60866_app2.docx" xlink:title="DOCX File, 16 KB"/></supplementary-material><supplementary-material id="app3"><label>Multimedia Appendix 3</label><p>Feedforward neural network.</p><media xlink:href="xmed_v6i1e60866_app3.docx" xlink:title="DOCX File, 18 KB"/></supplementary-material><supplementary-material id="app4"><label>Multimedia Appendix 4</label><p>Vision transformer.</p><media xlink:href="xmed_v6i1e60866_app4.docx" xlink:title="DOCX File, 18 KB"/></supplementary-material></app-group></back></article>