<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIRx Med</journal-id><journal-id journal-id-type="publisher-id">xmed</journal-id><journal-id journal-id-type="index">34</journal-id><journal-title>JMIRx Med</journal-title><abbrev-journal-title>JMIRx Med</abbrev-journal-title><issn pub-type="epub">2563-6316</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v6i1e70100</article-id><article-id pub-id-type="doi">10.2196/70100</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Data Obfuscation Through Latent Space Projection for Privacy-Preserving AI Governance: Case Studies in Medical Diagnosis and Finance Fraud Detection</article-title></title-group><contrib-group><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Vaijainthymala Krishnamoorthy</surname><given-names>Mahesh</given-names></name><degrees>BE</degrees><xref ref-type="aff" rid="aff1"/></contrib></contrib-group><aff id="aff1"><institution>Stelmith, LLC</institution><addr-line>2333 Aberdeen Pl</addr-line><addr-line>Carollton</addr-line><addr-line>TX</addr-line><country>United States</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Hang</surname><given-names>Ching Nam</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Singh</surname><given-names>Reenu</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Bommhardt</surname><given-names>Trutz</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Mahesh Vaijainthymala Krishnamoorthy, BE, Stelmith, LLC, 2333 Aberdeen Pl, Carollton, TX, 75007, United States, 1 9459001314; <email>mahesh.vaikri@ieee.org</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>12</day><month>3</month><year>2025</year></pub-date><volume>6</volume><elocation-id>e70100</elocation-id><history><date date-type="received"><day>15</day><month>12</month><year>2024</year></date><date date-type="rev-recd"><day>01</day><month>02</month><year>2025</year></date><date date-type="accepted"><day>02</day><month>02</month><year>2025</year></date></history><copyright-statement>&#x00A9; Mahesh Vaijainthymala Krishnamoorthy. Originally published in JMIRx Med (<ext-link ext-link-type="uri" xlink:href="https://med.jmirx.org">https://med.jmirx.org</ext-link>), 12.3.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIRx Med, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://med.jmirx.org/">https://med.jmirx.org/</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://xmed.jmir.org/2025/1/e70100"/><related-article related-article-type="companion" ext-link-type="doi" xlink:href="10.48550/arXiv.2410.17459" xlink:title="Preprint (arXiv)" xlink:type="simple">https://arxiv.org/abs/2410.17459v1</related-article><related-article related-article-type="companion" ext-link-type="doi" xlink:href="10.2196/72523" xlink:title="Peer-Review Report by Reenu Singh (AP)" xlink:type="simple">https://med.jmirx.org/2025/1/e72523</related-article><related-article related-article-type="companion" ext-link-type="doi" xlink:href="10.2196/72525" xlink:title="Peer-Review Report by Trutz Bommhardt (AR)" xlink:type="simple">https://med.jmirx.org/2025/1/e72525</related-article><related-article related-article-type="companion" ext-link-type="doi" xlink:href="10.2196/72527" xlink:title="Authors' Response to Peer-Review Reports" xlink:type="simple">https://med.jmirx.org/2025/1/e72527</related-article><abstract><sec><title>Background</title><p>The increasing integration of artificial intelligence (AI) systems into critical societal sectors has created an urgent demand for robust privacy-preserving methods. Traditional approaches such as differential privacy and homomorphic encryption often struggle to maintain an effective balance between protecting sensitive information and preserving data utility for AI applications. This challenge has become particularly acute as organizations must comply with evolving AI governance frameworks while maintaining the effectiveness of their AI systems.</p></sec><sec><title>Objective</title><p>This paper aims to introduce and validate data obfuscation through latent space projection (LSP), a novel privacy-preserving technique designed to enhance AI governance and ensure responsible AI compliance. The primary goal is to develop a method that can effectively protect sensitive data while maintaining essential features necessary for AI model training and inference, thereby addressing the limitations of existing privacy-preserving approaches.</p></sec><sec sec-type="methods"><title>Methods</title><p>We developed LSP using a combination of advanced machine learning techniques, specifically leveraging autoencoder architectures and adversarial training. The method projects sensitive data into a lower-dimensional latent space, where it separates sensitive from nonsensitive information. This separation enables precise control over privacy-utility trade-offs. We validated LSP through comprehensive experiments on benchmark datasets and implemented 2 real-world case studies: a health care application focusing on cancer diagnosis and a financial services application analyzing fraud detection.</p></sec><sec sec-type="results"><title>Results</title><p>LSP demonstrated superior performance across multiple evaluation metrics. In image classification tasks, the method achieved 98.7% accuracy while maintaining strong privacy protection, providing 97.3% effectiveness against sensitive attribute inference attacks. This performance significantly exceeded that of traditional anonymization and privacy-preserving methods. The real-world case studies further validated LSP&#x2019;s effectiveness, showing robust performance in both health care and financial applications. Additionally, LSP demonstrated strong alignment with global AI governance frameworks, including the General Data Protection Regulation, the California Consumer Privacy Act, and the Health Insurance Portability and Accountability Act.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>LSP represents a significant advancement in privacy-preserving AI, offering a promising approach to developing AI systems that respect individual privacy while delivering valuable insights. By embedding privacy protection directly within the machine learning pipeline, LSP contributes to key principles of fairness, transparency, and accountability. Future research directions include developing theoretical privacy guarantees, exploring integration with federated learning systems, and enhancing latent space interpretability. These developments position LSP as a crucial tool for advancing ethical AI practices and ensuring responsible technology deployment in privacy-sensitive domains.</p></sec></abstract><kwd-group><kwd>privacy-preserving AI</kwd><kwd>latent space projection</kwd><kwd>data obfuscation</kwd><kwd>AI governance</kwd><kwd>machine learning privacy</kwd><kwd>differential privacy</kwd><kwd>k-anonymity</kwd><kwd>HIPAA</kwd><kwd>GDPR</kwd><kwd>compliance</kwd><kwd>data utility</kwd><kwd>privacy-utility trade-off</kwd><kwd>responsible AI</kwd><kwd>medical imaging privacy</kwd><kwd>secure data sharing</kwd><kwd>artificial intelligence</kwd><kwd>General Data Protection Regulation</kwd><kwd>Health Insurance Portability and Accountability Act</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><sec id="s1-1"><title>Background</title><p>The rapid advancement and widespread adoption of artificial intelligence (AI) across critical sectors of society have ushered in an era of unprecedented data analysis and decision-making capabilities. From health care diagnostics to financial fraud detection, AI systems are processing increasingly large volumes of sensitive personal data. However, this progress has been accompanied by growing concerns about privacy, data protection, and the potential misuse of personal information.</p><p>The tension between leveraging data for AI advancements and protecting individual privacy has become a central challenge in the field of AI governance. Traditional approaches to data privacy, such as anonymization and differential privacy, often struggle to balance the trade-off between privacy protection and data utility. As AI systems become more sophisticated, there is an urgent need for novel privacy-preserving techniques that can protect sensitive information without significantly compromising the performance of AI models.</p><p>In this research, we introduce data obfuscation through latent space projection (LSP), a novel privacy-preserving technique designed to address these challenges. LSP leverages recent advancements in representation learning and adversarial training to create a privacy-preserving data transformation pipeline. By projecting raw data into a latent space and then reconstructing it with carefully controlled information loss, we aim to obfuscate sensitive attributes while preserving the overall structure and relationships within the data that are crucial for AI model performance.</p><p>This research makes several significant contributions to the field of privacy-preserving machine learning. At the core of this work, we develop and present a comprehensive latent space projection framework, providing detailed insights into its theoretical underpinnings, architectural design, and practical implementation considerations. We advance the field&#x2019;s measurement capabilities by introducing innovative metrics specifically designed to evaluate the critical balance between privacy protection and data utility in latent space representations. Through rigorous experimentation on established benchmark datasets, we demonstrate that LSP consistently outperforms traditional privacy-preserving approaches across multiple performance dimensions.</p><p>To bridge the gap between theory and practice, we showcase LSP&#x2019;s real-world effectiveness through 2 critical case studies in highly sensitive domains: cancer diagnosis and financial fraud detection. Understanding the practical constraints of deployment, we conduct thorough analyses of LSP&#x2019;s operational characteristics, including latency and computational resource requirements. Finally, we explore the broader implications of our work, examining how LSP contributes to the responsible development of AI systems and aligns with emerging global AI governance frameworks, providing a foundation for future privacy-preserving AI applications.</p></sec><sec id="s1-2"><title>The Privacy Challenge in AI</title><p>The exponential growth of data and the increasing sophistication of AI models have led to significant advancements in various fields. However, this progress has also raised critical privacy concerns [<xref ref-type="bibr" rid="ref1">1</xref>]. AI models, particularly deep learning architectures, often require vast amounts of data to achieve high performance. This data frequently contains sensitive personal information, ranging from medical records to financial transactions.</p><p>The potential for privacy breaches in AI systems is multifaceted and detailed in the following sections.</p></sec><sec id="s1-3"><title>Data Breaches</title><p>Large datasets used for AI training are attractive targets for cyberattacks, potentially exposing the sensitive information of millions of individuals[<xref ref-type="bibr" rid="ref2">2</xref>,<xref ref-type="bibr" rid="ref3">3</xref>].</p></sec><sec id="s1-4"><title>Model Inversion Attacks</title><p>Sophisticated attacks can potentially reconstruct training data from model parameters, compromising the privacy of individuals in the training set [<xref ref-type="bibr" rid="ref4">4</xref>].</p></sec><sec id="s1-5"><title>Membership Inference</title><p>These attacks aim to determine whether a particular data point was used in training a model, which can reveal sensitive information about individuals [<xref ref-type="bibr" rid="ref5">5</xref>].</p></sec><sec id="s1-6"><title>Attribute Inference</title><p>Even when direct identifiers are removed, AI models may inadvertently learn and expose sensitive attributes of individuals in their training data [<xref ref-type="bibr" rid="ref6">6</xref>].</p></sec><sec id="s1-7"><title>Unintended Memorization</title><p>Neural networks have been shown to sometimes memorize specific data points from their training set, potentially exposing sensitive information during inference [<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>These privacy risks are not merely theoretical. High-profile incidents of privacy breaches and misuse of personal data have eroded public trust in AI systems and raised regulatory scrutiny. Consequently, there is an urgent need for robust privacy-preserving techniques that can mitigate these risks while allowing AI to deliver its potential benefits to society.</p></sec><sec id="s1-8"><title>Existing Privacy-Preserving Techniques</title><p>Several approaches have been developed to address privacy concerns in AI.</p><sec id="s1-8-1"><title>K-Anonymity</title><p>Introduced by Sweeney [<xref ref-type="bibr" rid="ref8">8</xref>], k-anonymity ensures that each record in a dataset is indistinguishable from at least k-1 other records with respect to certain identifying attributes. Although effective for simple datasets, k-anonymity struggles with high-dimensional data common in modern AI applications.</p></sec><sec id="s1-8-2"><title>Differential Privacy</title><p>Developed by Dwork et al [<xref ref-type="bibr" rid="ref9">9</xref>], differential privacy provides a formal framework for quantifying and limiting the privacy risk of statistical queries on datasets. It has been successfully applied to various machine learning algorithms [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>] but often introduces a significant trade-off between privacy and model utility.</p></sec><sec id="s1-8-3"><title>Homomorphic Encryption</title><p>This technique allows computations to be performed on encrypted data without decryption [<xref ref-type="bibr" rid="ref12">12</xref>]. Although providing strong privacy guarantees, homomorphic encryption incurs substantial computational overhead, making it impractical for many real-time AI applications.</p></sec><sec id="s1-8-4"><title>Federated Learning</title><p>Proposed by McMahan et al [<xref ref-type="bibr" rid="ref13">13</xref>], federated learning allows models to be trained on decentralized data without directly sharing raw information. However, it can still be vulnerable to certain types of privacy attacks and faces challenges in scenarios requiring centralized data analysis.</p></sec><sec id="s1-8-5"><title>Synthetic Data Generation</title><p>Techniques like differentially private generative adversarial networks (GANs) [<xref ref-type="bibr" rid="ref14">14</xref>] aim to generate synthetic datasets that preserve statistical properties of the original data while providing privacy guarantees. However, these methods often struggle to capture complex relationships present in real-world data.</p><p>Although each of these approaches has its merits, they all face limitations when applied to the complex, high-dimensional datasets typical in modern AI applications. Many struggle to provide strong privacy guarantees without significantly degrading model performance or incurring prohibitive computational costs.</p></sec></sec><sec id="s1-9"><title>The Promise of Latent Space Approaches</title><p>Recent advancements in representation learning, particularly in the field of deep learning, have opened new avenues for privacy-preserving data analysis [<xref ref-type="bibr" rid="ref15">15</xref>]. Latent space models, such as autoencoders and variational autoencoders [<xref ref-type="bibr" rid="ref16">16</xref>], have demonstrated a remarkable ability to learn compact, abstract representations of complex data.</p></sec><sec id="s1-10"><title>Latency Characteristics</title><p>LSP&#x2019;s latency profile can be broken down into three main components: (1) encoding latency (the time taken to project input data into the latent space), (2) processing latency (the time required to perform operations, eg, machine learning tasks, in the latent space), and (3) decoding latency (the time needed to reconstruct data from the latent space, if required).</p></sec><sec id="s1-11"><title>Performance Optimization Characteristics</title><p>These latent representations offer several potential advantages for privacy-preserving AI. Several optimizations contribute to LSP&#x2019;s improved latency and overall performance:</p><list list-type="order"><list-item><p>Dimensionality reduction: By projecting data into a lower-dimensional latent space, LSP reduces the computational complexity of subsequent operations, so irrelevant or sensitive features can be naturally obscured. This is particularly beneficial for high-dimensional data like images or complex time series.</p></list-item><list-item><p>Parallel processing: The encoder and decoder networks in LSP can leverage the parallel processing capabilities of modern GPUs, significantly speeding up the projection and reconstruction processes.</p></list-item><list-item><p>Caching mechanisms: For scenarios where the same data are processed multiple times, LSP implementations can cache latent representations, eliminating the need for repeated encoding.</p></list-item><list-item><p>Model compression: Techniques such as pruning and quantization can be applied to the LSP networks, reducing their size, and improving inference speed without significantly impacting privacy or utility.</p></list-item><list-item><p>Adaptive computation: LSP can be implemented with adaptive computation techniques, where the depth or width of the network is dynamically adjusted based on the complexity of the input, further optimizing performance.</p></list-item><list-item><p>Disentanglement: Advanced techniques in representation learning aim to disentangle different factors of variation in the data, potentially allowing for selective obfuscation of sensitive attributes.</p></list-item><list-item><p>Nonlinear transformations: The complex, nonlinear mappings learned by deep neural networks can potentially create representations that are difficult to invert without knowledge of the encoding process.</p></list-item><list-item><p>Compatibility with deep learning: Latent space approaches integrate naturally with deep learning architectures, allowing for end-to-end privacy-preserving AI pipelines.</p></list-item></list><p>Building on these insights, our proposed LSP technique aims to leverage the power of latent space representations to create a robust, flexible framework for privacy-preserving AI. By combining ideas from representation learning, adversarial training, and information theory, LSP seeks to overcome the limitations of existing approaches and provide a more effective solution to the privacy challenges in modern AI systems.</p></sec><sec id="s1-12"><title>Related Work</title><p>Privacy-preserving techniques in AI have garnered significant attention, particularly as regulations such as the General Data Protection Regulation (GDPR) and California Consumer Privacy Act (CCPA) come into force. Existing methods provide foundational solutions but have limitations when applied to large-scale data systems.</p><sec id="s1-12-1"><title>Differential Privacy</title><p>Differential privacy, introduced by Dwork et al [<xref ref-type="bibr" rid="ref17">17</xref>], is a method that adds calibrated noise to datasets or model outputs to obscure individual data points while preserving the overall distribution. Despite its utility, differential privacy often introduces trade-offs between privacy and model accuracy, particularly when applied to complex, high-dimensional data [<xref ref-type="bibr" rid="ref18">18</xref>].</p></sec><sec id="s1-12-2"><title>Homomorphic Encryption</title><p>Homomorphic encryption allows computations to be performed on encrypted data without decrypting it [<xref ref-type="bibr" rid="ref12">12</xref>]. Although this approach is highly secure, its computational overhead makes it impractical for large-scale machine learning models that require real-time processing or high-volume datasets [<xref ref-type="bibr" rid="ref19">19</xref>].</p></sec><sec id="s1-12-3"><title>Federated Learning</title><p>Federated learning, proposed by McMahan et al [<xref ref-type="bibr" rid="ref13">13</xref>], ensures that raw data remains decentralized, with models trained on local devices instead of centralized servers. However, this technique is not immune to privacy risks, as model gradients or weights exchanged between devices can still leak sensitive information [<xref ref-type="bibr" rid="ref20">20</xref>,<xref ref-type="bibr" rid="ref21">21</xref>].</p></sec><sec id="s1-12-4"><title>Generative Models for Privacy</title><p>Recent work has explored the use of generative models, such as GANs, for creating synthetic data that preserves privacy [<xref ref-type="bibr" rid="ref22">22</xref>]. Although promising, these approaches often struggle with mode collapse and may not fully capture the complexity of real-world data distributions.</p><p>LSP builds upon these existing approaches while addressing their limitations. By learning privacy-preserving latent representations, LSP aims to provide a more flexible and efficient solution for data obfuscation that can be applied across various domains and AI tasks.</p></sec></sec></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Data Obfuscation Through LSP</title><p>In this section, we present the details of our LSP framework for privacy-preserving data obfuscation. We begin by outlining the key principles behind LSP, then describe the network architecture and training procedure.</p></sec><sec id="s2-2"><title>Principles of LSP</title><p>The core idea behind LSP is to transform raw data into a latent space where sensitive information is obscured, yet essential features for downstream AI tasks are retained. This is achieved through the following key principles.</p><list list-type="bullet"><list-item><p>Feature preservation: The latent representation should maintain sufficient information for relevant AI tasks, ensuring high utility of the obfuscated data.</p></list-item><list-item><p>Adversarial privacy: We employ adversarial training to make it difficult for an attacker to recover sensitive information from the latent representation.</p></list-item><list-item><p>Task-agnostic design: The LSP framework is designed to be adaptable to various data types and downstream tasks without requiring significant modifications.</p></list-item></list></sec><sec id="s2-3"><title>Network Architecture</title><p><xref ref-type="fig" rid="figure1">Figure 1</xref> depicts the flow of data through the LSP framework. The input data x is first passed through the encoder network E, which projects it into a latent space representation z. This latent representation is then processed by the decoder network D to reconstruct the input, producing x&#x2019;. Simultaneously, the privacy discriminator P attempts to extract sensitive information s from the latent representation z. The framework is trained adversarial to optimize the trade-off between reconstruction accuracy and privacy protection.</p><p>The LSP framework consists of three main components: an encoder network, a decoder network, and a privacy discriminator. These components work together to create privacy-preserving latent representations of the input data. <xref ref-type="fig" rid="figure1">Figure 1</xref> illustrates the overall architecture of the LSP framework.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Latent space projection system architecture (network diagram).</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xmed_v6i1e70100_fig01.png"/></fig></sec><sec id="s2-4"><title>Encoder Network</title><p>The encoder network E (X &#x2192; Z) maps the input data x &#x2208; X to a latent representation z &#x2208; Z. We implement E as a deep neural network with an architecture tailored to the specific data type.</p><p>For image data, the encoder architecture uses a progressive series of convolutional layers with expanding filter sizes, beginning at 32 and scaling up through 64, 128, and 256 filters. Each convolutional operation is augmented by batch normalization and leaky rectified linear unit (ReLU) activation functions to improve training stability and introduce nonlinearity. The network incorporates strided convolutions or max pooling operations strategically placed throughout the architecture to achieve spatial downsampling of the feature maps. The encoding process culminates in fully connected layers that compress the processed features into the final latent representation, effectively capturing the essential characteristics of the input data in a lower-dimensional space.</p><p>For text data, the text encoder&#x2019;s architecture begins with an embedding layer that transforms input tokens into dense vector representations. At its core, the model utilizes a transformer encoder equipped with multihead self-attention layers to capture complex relationships between tokens in the input sequence. The architecture incorporates layer normalization and residual connections between transformer blocks to facilitate stable training and effective gradient flow. The encoding process concludes with a pooling operation, specifically mean pooling, followed by fully connected layers that produce the final encoded representation of the text input.</p><p>The latent space Z is structured as Z=Z_s &#x2295; Z_ns, where Z_s represents the subspace for sensitive information and Z_ns for nonsensitive information. This separation is enforced through the loss functions and architecture design, which we will discuss in detail in the training procedure section.</p></sec><sec id="s2-5"><title>Decoder Network</title><p>The decoder network D (Z &#x2192; X&#x2019;) reconstructs the input data from the latent representation. Its architecture mirrors that of the encoder.</p><p>For image data, the decoder architecture begins with fully connected layers that transform the latent space representation back into a spatial format, setting the foundation for image reconstruction. This is followed by a cascade of transposed convolutional layers with progressively decreasing filter sizes, systematically expanding the spatial dimensions while refining feature details. Each transposed convolutional layer incorporates batch normalization and ReLU activation functions to maintain training stability and introduce necessary nonlinearities. The network uses upsampling operations, utilizing either nearest-neighbor or bilinear interpolation techniques, to gradually restore the spatial resolution of the features. The reconstruction process culminates in a final convolutional layer with tanh activation, which produces the output image with values appropriately scaled to the target range, effectively completing the decoding process from latent space back to image space</p><p>For text data, the text decoder&#x2019;s architecture initiates with fully connected layers that transform the latent space representation into a sequence format suitable for text generation. At its heart, the model uses a transformer decoder equipped with multihead attention layers, enabling the network to effectively capture complex dependencies and relationships within the generated sequence. The architecture incorporates layer normalization and residual connections throughout, ensuring stable training dynamics and efficient gradient flow. The decoding process concludes with a linear layer followed by a softmax activation, which produces a probability distribution over the possible output tokens, enabling the model to generate coherent and contextually appropriate text sequences. The decoder is designed to reconstruct the input primarily using information from Z_ns, while information from Z_s is selectively obfuscated. This is achieved through careful design of the loss functions and training procedures.</p></sec><sec id="s2-6"><title>Privacy Discriminator</title><p>The privacy discriminator P (Z &#x2192; S) attempts to recover sensitive information s &#x2208; S from the latent representation z. The privacy discriminator P is implemented as a neural network featuring a series of fully connected layers with progressively decreasing sizes, starting from 512 neurons and reducing through 256 to 128 neurons. Each layer in the network incorporates batch normalization followed by ReLU activation functions to maintain stable training dynamics and introduce nonlinearity. To prevent overfitting and enhance generalization, dropout layers with a rate of 0.3 are strategically integrated throughout the architecture.</p><p>The network culminates in a final layer whose activation function is specifically chosen to match the nature of the sensitive attribute being protected, using sigmoid activation for binary attributes or softmax activation for categorical variables, effectively enabling the network to learn and identify potential privacy leakage in the latent representations. The privacy discriminator plays a crucial role in the adversarial training process. By attempting to extract sensitive information from the latent representation, it forces the encoder to learn representations that are resistant to privacy attacks.</p></sec><sec id="s2-7"><title>Information Flow and Gradient Propagation</title><p>In <xref ref-type="fig" rid="figure2">Figure 2</xref>, solid arrows represent the forward pass of data through the network, while dashed arrows indicate the flow of gradients during backpropagation. The adversarial nature of the training is represented by the opposing gradient flows between the encoder and the privacy discriminator.</p><p>The information flow in our architecture creates a carefully balanced training dynamic between its key components. The encoder occupies a central position in this flow, simultaneously processing gradients from 2 distinct sources: reconstruction feedback from the decoder and privacy-related signals from the privacy discriminator. Although the decoder&#x2019;s role remains focused solely on the reconstruction objective, receiving gradients exclusively related to this task, the privacy discriminator engages in an adversarial relationship with the encoder. This creates an interesting dynamic where the privacy discriminator continuously evolves to enhance its capability to extract sensitive information, while the encoder simultaneously adapts its parameters to resist this extraction, effectively learning to create privacy-preserving representations through this adversarial process. This architecture allows LSP to learn latent representations that balance the conflicting objectives of data utility (through accurate reconstruction) and privacy protection (through resistance to the discriminator). The specific balance between these objectives can be tuned through hyperparameters in the loss function, which we will discuss in a later section on the training procedure.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>LSP system flow diagram. LSP: latent space projection.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xmed_v6i1e70100_fig02.png"/></fig></sec><sec id="s2-8"><title>Ethical Considerations</title><p>This research did not require institutional review board approval as it does not involve human subjects research as defined by 45 CFR 46.102(e)(1). Additionally, the study uses publicly available datasets.</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><p>To demonstrate the effectiveness and versatility of LSP, we conducted extensive experiments on both benchmark datasets and real-world case studies. Our evaluation encompassed a wide range of data types and privacy-sensitive domains, showcasing LSP&#x2019;s ability to balance privacy protection with data utility.</p><sec id="s3-1"><title>Benchmark Evaluation</title><p>Our comprehensive evaluation of LSP encompassed multiple benchmark datasets, enabling rigorous comparison against established privacy-preserving methods including k-anonymity, differential privacy, federated learning, and GAN-based synthetic data generation approaches. The evaluation framework incorporated diverse data modalities and tasks: the Modified National Institute of Standards and Technology &#x2013; United States Postal Service (MNIST-USPS) dataset (<xref ref-type="table" rid="table1">Table 1</xref>) for image classification tasks, the CelebA dataset to assess image generation capabilities, the Adult Census dataset for tabular data classification scenarios, and the IMDB Reviews dataset to evaluate performance on text classification tasks. This diverse selection of benchmarks allowed us to thoroughly assess LSP&#x2019;s effectiveness across varying data types and application contexts, providing a robust foundation for comparing its performance against existing privacy-preserving techniques.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Modified National Institute of Standards and Technology &#x2013; United States Postal Service digit classification task.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Method</td><td align="left" valign="bottom">Accuracy (%)</td><td align="left" valign="bottom">Privacy protection (%)</td></tr></thead><tbody><tr><td align="left" valign="top">Raw data</td><td align="left" valign="top">99.2</td><td align="left" valign="top">0</td></tr><tr><td align="left" valign="top">k-Anonymity</td><td align="left" valign="top">94.5</td><td align="left" valign="top">78.3</td></tr><tr><td align="left" valign="top">Differential privacy</td><td align="left" valign="top">97.1</td><td align="left" valign="top">92.6</td></tr><tr><td align="left" valign="top">Federated learning</td><td align="left" valign="top">98.3</td><td align="left" valign="top">85.7</td></tr><tr><td align="left" valign="top">Generative adversarial network</td><td align="left" valign="top">96.8</td><td align="left" valign="top">94.2</td></tr><tr><td align="left" valign="top">Latent space projection (our method)</td><td align="left" valign="top">98.7</td><td align="left" valign="top">97.3</td></tr></tbody></table></table-wrap><p>The raw data baseline achieves the highest classification accuracy at 99.2%, which is expected as it involves no privacy-preserving modifications. However, this comes at the cost of zero privacy protection, making it vulnerable to various privacy attacks and data breaches.</p><p>K-anonymity, while providing a moderate privacy protection level of 78.3%, shows the most significant drop in accuracy to 94.5%. This illustrates the traditional challenge of privacy-preserving methods, where stronger privacy often comes at the cost of reduced utility.</p><p>Differential privacy demonstrates better balance, achieving 97.1% accuracy while offering strong privacy protection at 92.6%. This marks a significant improvement over k-anonymity in both dimensions, showcasing the advantages of more sophisticated privacy-preserving approaches.</p><p>Federated learning performs exceptionally well in terms of accuracy at 98.3%, though its privacy protection (85.7%) is lower than some other methods. This reflects federated learning&#x2019;s primary focus on distributed computation while maintaining model performance.</p><p>The GAN-based approach achieves 96.8% accuracy with very strong privacy protection (94.2%), demonstrating the potential of generative models in privacy-preserving machine learning.</p><p>Our proposed LSP method achieves the most favorable balance, with 98.7% accuracy (only 0.5% below raw data), while providing the highest privacy protection at 97.3%. This demonstrates LSP&#x2019;s ability to maintain near&#x2013;raw-data performance while offering superior privacy guarantees. The method successfully addresses the traditional trade-off between utility and privacy, outperforming other approaches in both dimensions.</p><p>The results clearly demonstrate that LSP achieves a new state-of-the-art in balancing the crucial trade-off between model utility and privacy protection, making it particularly suitable for sensitive applications where both high accuracy and strong privacy guarantees are essential.</p></sec><sec id="s3-2"><title>Case Study 1: Cancer Diagnosis With BreakHis Dataset</title><p>Building on our benchmark results, we applied LSP to the real-world domain of cancer diagnosis using the Breast Cancer Histopathological Image Classification (BreakHis) dataset.</p><p>The BreakHis dataset contains 2637 microscopic images of breast tissue biopsies. We split the data into 2109 training images and 528 test images. Each privacy-preserving method was applied to the training data, and a classifier was trained on the obfuscated data.</p><p><xref ref-type="table" rid="table2">Table 2</xref> presents a comprehensive evaluation of various privacy-preserving techniques on the BreakHis dataset, offering crucial insights into their performance across multiple metrics. The raw data analysis serves as our baseline, demonstrating the highest classification performance with an <italic>F</italic><sub>1</sub>-score of 0.8303 and accuracy of 84.28%. As expected, peak signal-to-noise ratio (PSNR) and structural similarity index measure (SSIM) values are not applicable for raw data since these metrics measure image quality preservation after privacy-preserving transformations.</p><p>Our proposed LSP method demonstrates remarkable effectiveness, achieving an <italic>F</italic><sub>1</sub>-score of 0.7910 and accuracy of 80.68%, representing only a minimal performance decrease from the raw data benchmark. The method&#x2019;s strength is particularly evident in its image quality preservation metrics, with a PSNR of 21.87 and an SSIM of 0.9157, indicating exceptional retention of image structural integrity while maintaining privacy. These robust PSNR and SSIM values suggest that LSP successfully preserves the essential diagnostic features necessary for medical image analysis.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Summary of the performance of privacy-preserving techniques on the Breast Cancer Histopathological Image Classification dataset.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Method</td><td align="left" valign="bottom"><italic>F</italic><sub>1</sub>-score</td><td align="left" valign="bottom">Accuracy (%)</td><td align="left" valign="bottom">Peak signal-to-noise ratio</td><td align="left" valign="bottom">Structural similarity index measure</td></tr></thead><tbody><tr><td align="left" valign="top">Raw data</td><td align="left" valign="top">0.8303</td><td align="left" valign="top">84.28</td><td align="left" valign="top">&#x2014;<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup></td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Latent space projection (our method)</td><td align="left" valign="top">0.7910</td><td align="left" valign="top">80.68</td><td align="left" valign="top">21.87</td><td align="left" valign="top">0.9157</td></tr><tr><td align="left" valign="top">k-Anonymity</td><td align="left" valign="top">0.6205</td><td align="left" valign="top">69.89</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top">Differential privacy</td><td align="left" valign="top">0.5349</td><td align="left" valign="top">62.12</td><td align="left" valign="top">5.28</td><td align="left" valign="top">0.0042</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>Not applicable.</p></fn></table-wrap-foot></table-wrap><p>K-anonymity shows a more substantial degradation in classification performance, with an <italic>F</italic><sub>1</sub>-score of 0.6205 and accuracy dropping to 69.89%. The absence of PSNR and SSIM measurements for k-anonymity reflects the method&#x2019;s inherent limitation in preserving image quality, as it focuses on grouping similar data points rather than maintaining visual fidelity.</p><p>Differential privacy exhibits the most significant performance impact among all methods, with an <italic>F</italic><sub>1</sub>-score of 0.5349 and accuracy of 62.12%. The notably low PSNR of 5.28 and SSIM of 0.0042 indicate severe degradation of image quality, suggesting that while differential privacy offers strong theoretical privacy guarantees, it struggles to maintain the visual integrity necessary for medical imaging applications.</p><p>These results conclusively demonstrate LSP&#x2019;s superior ability to balance privacy protection with utility preservation, particularly in the context of sensitive medical imaging applications. The method&#x2019;s exceptional performance across all evaluation metrics, especially in maintaining high PSNR and SSIM values while achieving strong classification performance, positions it as a promising solution for privacy-preserving medical image analysis.</p><p>The training dynamics illustrated in <xref ref-type="fig" rid="figure3">Figure 3</xref> provide compelling evidence of LSP&#x2019;s learning efficiency and stability. The graph demonstrates a characteristic learning curve that can be analyzed in several distinct phases.</p><p>Initial rapid descent phase (epochs 0&#x2010;5): The training loss exhibits a sharp decline from approximately 0.032 to 0.015, indicating the model&#x2019;s quick adaptation to the learning task. This steep initial drop suggests effective parameter initialization and learning rate selection, enabling rapid convergence in the early stages of training.</p><p>Transition phase (epochs 5&#x2010;15): The loss curve shows a more gradual but steady decrease, dropping from 0.015 to approximately 0.005. This phase represents the model&#x2019;s fine-tuning period, where it begins to capture more subtle patterns in the data while maintaining privacy constraints.</p><p>Stabilization phase (epochs 15&#x2010;50): The loss curve enters a stable region where it continues to decrease but at a much slower rate, eventually converging to around 0.0025. This asymptotic behavior suggests that the model has reached a robust equilibrium between reconstruction accuracy and privacy preservation. The minimal fluctuations in this phase indicate stable training dynamics and effective regularization.</p><p>The final training loss of 0.0025 and reconstruction error of 0.006340186 are particularly noteworthy as they demonstrate LSP&#x2019;s ability to achieve high-fidelity data representation while maintaining privacy guarantees. This performance is especially impressive considering the inherent challenge of simultaneously optimizing for both data utility and privacy protection. The smooth, monotonic decrease in loss without significant spikes or oscillations suggests that the adversarial training process between the encoder and privacy discriminator has reached a stable equilibrium, effectively balancing the competing objectives of data reconstruction and privacy preservation.</p><p>These training dynamics provide strong empirical support for LSP&#x2019;s theoretical foundations and practical viability in real-world privacy-preserving applications.</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Chart showing the LSP training loss across 50 epochs. LSP: latent space projection.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xmed_v6i1e70100_fig03.png"/></fig><p><xref ref-type="fig" rid="figure4">Figure 4</xref> displays a comprehensive visual comparison of different privacy-preserving techniques applied to medical images used in cancer diagnosis, showcasing 5 distinct rows of image transformations. Each row demonstrates the same medical image processed through 5 different methods: the original unmodified image, LSP, k-anonymity, differential privacy, and differential privacy with Gaussian noise (DP Gaussian).</p><p>The original images (leftmost column) show clear medical tissue samples with distinct features and varying levels of detail. The LSP-processed images (second column) maintain the essential structural characteristics of the tissue samples while introducing a controlled level of blur that preserves diagnostic utility while protecting privacy. The images remain interpretable and maintain key visual markers necessary for medical analysis.</p><p>The k-anonymity approach (middle column) results in significantly blurred images that retain only basic shape information, potentially compromising diagnostic utility. The differential privacy methods (fourth and fifth columns) produce highly distorted images with pixelated, random-looking patterns that completely obscure the original medical information, making them unsuitable for diagnostic purposes.</p><p>This visual comparison effectively demonstrates LSP&#x2019;s superior ability to balance privacy protection with practical utility. Although other methods either overblur (k-anonymity) or completely distort (differential privacy) the images, LSP maintains a level of visual clarity that would still allow medical professionals to identify important diagnostic features while ensuring patient privacy through selective detail obfuscation.</p><p>The consistent pattern across all 5 sample rows reinforces the reliability and reproducibility of each method&#x2019;s effects, with LSP consistently providing the most balanced results between protecting privacy and maintaining diagnostic utility in the medical imaging context.</p><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>Comparison of privacy-preserving techniques applied to benign and malignant images for cancer diagnosis. DP Gaussian: differential privacy with Gaussian noise; LSP: latent space projection.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xmed_v6i1e70100_fig04.png"/></fig></sec><sec id="s3-3"><title>Case Study 2: Financial Pay Card Fraud Analysis</title><p>In the financial sector, we applied LSP to a dataset of credit card transactions to detect fraudulent activities. This case study showcases LSP&#x2019;s effectiveness in preserving privacy in financial data while enabling accurate fraud detection models.</p><sec id="s3-3-1"><title>Dataset and Methodology</title><p>We used an anonymized dataset of credit card transactions from a major European bank, containing 284,807 transactions over 2 days, with 492 frauds. The dataset includes time, amount, and 28 principal component analysis&#x2013;transformed features. We split the data into 80% training and 20% testing sets.</p><p>We applied LSP and other privacy-preserving techniques to the training data, then trained a gradient boosting classifier for fraud detection on the obfuscated data. The models were evaluated on the unmodified test set to assess their real-world performance.</p></sec><sec id="s3-3-2"><title>Problem Statement</title><p>Financial institutions must analyze vast datasets of credit card transactions to identify fraud patterns. Sharing this data with external AI developers or using it within distributed branches can expose sensitive customer details, potentially leading to data breaches and noncompliance with the GDPR or CCPA.</p></sec><sec id="s3-3-3"><title>LSP Application</title><p>We used LSP to encode transaction data into latent space, where sensitive details like credit card numbers and exact transaction amounts are obfuscated. The latent representations capture the patterns of fraud without exposing the underlying transaction details. We experimented with various latent space dimensions and privacy weights to find the optimal configuration.</p><p>The experimental results presented in <xref ref-type="table" rid="table3">Table 3</xref> demonstrate LSP&#x2019;s exceptional ability to maintain utility while providing robust privacy protection, as visualized in <xref ref-type="fig" rid="figure4">Figure 4</xref>. The LSP framework achieves performance metrics nearly identical to those of raw data, maintaining a high area under the curve&#x2013;receiver operating characteristic (AUC-ROC) of 0.9972 and <italic>F</italic><sub>1</sub>-score of 0.8000. Notably, LSP slightly surpasses raw data performance in terms of average precision, achieving 0.7143 compared to the baseline 0.7101, suggesting enhanced precision in fraud detection scenarios.</p><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Comparison of privacy-preserving methods in fraud detection.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Method</td><td align="left" valign="bottom">Area under the curve&#x2014;receiver operating characteristic</td><td align="left" valign="bottom"><italic>F</italic><sub>1</sub>-score</td><td align="left" valign="bottom">Accuracy</td><td align="left" valign="bottom">Average precision</td><td align="left" valign="bottom">Privacy metric</td></tr></thead><tbody><tr><td align="left" valign="top">Raw data</td><td align="left" valign="top">0.9974</td><td align="left" valign="top">0.8000</td><td align="left" valign="top">0.9995</td><td align="left" valign="top">0.7101</td><td align="left" valign="top">0.0000</td></tr><tr><td align="left" valign="top">Latent space projection (dim=8, weight=0.2)</td><td align="left" valign="top">0.9972</td><td align="left" valign="top">0.8000</td><td align="left" valign="top">0.9995</td><td align="left" valign="top">0.7143</td><td align="left" valign="top">0.5225</td></tr><tr><td align="left" valign="top">Differential privacy (<italic>&#x03B5;</italic>=10.0)</td><td align="left" valign="top">0.9944</td><td align="left" valign="top">0.8000</td><td align="left" valign="top">0.9995</td><td align="left" valign="top">0.6917</td><td align="left" valign="top">0.0212</td></tr><tr><td align="left" valign="top">k-Anonymity (k=5)</td><td align="left" valign="top">0.9728</td><td align="left" valign="top">0.0000</td><td align="left" valign="top">0.9910</td><td align="left" valign="top">0.0388</td><td align="left" valign="top">0.8501</td></tr></tbody></table></table-wrap></sec><sec id="s3-3-4"><title>Results and Benefits</title><p>In terms of privacy protection, LSP demonstrates substantial advantages with a privacy metric of 0.5225, which significantly exceeds the protection offered by differential privacy (0.0212 at <italic>&#x03B5;</italic>=10.0). Although k-anonymity achieves a higher privacy metric of 0.8501, this comes at the complete expense of utility, resulting in an <italic>F</italic><sub>1</sub>-score of zero. These results underscore LSP&#x2019;s effectiveness in striking an optimal balance between maintaining data utility and ensuring privacy protection, outperforming traditional privacy-preserving approaches in this critical trade-off.</p><p>Our results establish LSP as a powerful solution for financial institutions seeking to balance effective fraud detection with stringent privacy requirements mandated by regulations like the CCPA and GDPR. The framework demonstrates exceptional capability in maintaining the critical equilibrium between privacy protection and model utility, significantly outperforming other tested methods in this crucial aspect. LSP&#x2019;s robust privacy guarantees make it particularly valuable for ensuring compliance with modern data protection regulations, while its ability to preserve fraud detection performance nearly identical to raw data processing speaks to its practical utility in real-world applications.</p><p>The framework offers remarkable flexibility through adjustable parameters in latent space dimensions and privacy weights, enabling financial institutions to precisely calibrate their privacy-utility balance according to specific operational requirements and risk tolerances. This adaptability, combined with LSP&#x2019;s strong performance metrics, positions it as a comprehensive solution for privacy-preserving fraud detection in the increasingly regulated financial services landscape.</p><p>In conclusion, LSP emerges as a promising technique for privacy-preserving fraud detection in the financial sector, offering a robust solution to the challenge of analyzing sensitive transaction data while maintaining individual privacy.</p><p><xref ref-type="fig" rid="figure5">Figure 5</xref> displays a comprehensive comparison of various privacy-preserving techniques through 2 distinct bar charts, focusing on performance metrics and privacy protection levels, respectively.</p><p>The upper chart displays 2 key performance indicators: AUC-ROC (shown in green) and <italic>F</italic><sub>1</sub>-score (shown in blue) across different implementations. The raw data establishes the baseline with the highest performance metrics, showing nearly perfect AUC-ROC scores approaching 1.0 and strong <italic>F</italic><sub>1</sub>-scores around 0.8. Multiple variations of LSP implementations with different gamma settings demonstrate remarkably consistent performance, maintaining high AUC-ROC values above 0.95 and <italic>F</italic><sub>1</sub>-scores consistently above 0.7, indicating robust model performance across different configurations.</p><p>The most notable observation in the performance metrics chart is the gradual degradation in both AUC-ROC and <italic>F</italic><sub>1</sub>-score as we move toward traditional privacy-preserving methods like k-anonymity. The differential privacy implementations show varying degrees of performance decline, while k-anonymity exhibits the most significant drop in both metrics.</p><p>The lower chart focuses on privacy protection levels, represented by a single metric shown in red bars. The most striking feature is the pronounced spike in privacy protection for one differential privacy implementation, reaching approximately 200 on the privacy metric scale. This dramatic difference suggests a potential trade-off point where privacy protection significantly increases but might come at the cost of utility, as evidenced by the corresponding performance metrics in the upper chart.</p><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Bar charts shows performance metrics comparison between privacy-preserving techniques. AUC-ROC: area under the curve&#x2013;receiver operating characteristic; LSP: latent space projection.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xmed_v6i1e70100_fig05.png"/></fig><p>LSP implementations consistently show minimal privacy protection scores in the lower chart, yet when viewed in conjunction with the performance metrics, this suggests LSP achieves an optimal balance&#x2014;maintaining high utility while providing sufficient privacy protection without extreme measures that could compromise the data&#x2019;s usability. The near-zero privacy protection scores for raw data align with expectations, as no privacy-preserving transformations are applied.</p><p>This visualization effectively illustrates the fundamental trade-off between model performance and privacy protection across different techniques and configurations, with LSP demonstrating superior balance between these competing objectives compared to traditional approaches.</p></sec></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Comparative Analysis With Existing Techniques</title><p>Our comprehensive comparison of LSP against existing privacy-preserving techniques reveals significant advantages across multiple dimensions. The analysis highlights LSP&#x2019;s superior performance in balancing privacy protection with data utility, computational efficiency, scalability, and adaptability to different data types.</p><p>In terms of privacy-utility balance, LSP demonstrates remarkable performance on the Modified National Institute of Standards and Technology dataset, achieving 98.7% classification accuracy while maintaining 97.3% protection against attribute inference attacks. This performance notably surpasses other methods, with differential privacy (<italic>&#x03B5;</italic>=1) achieving 94.5% accuracy and 96.8% protection, and k-anonymity (k=10) yielding 89.2% accuracy with 91.5% protection. These results underscore LSP&#x2019;s ability to maintain high utility while providing robust privacy guarantees.</p><p>The computational efficiency analysis reveals LSP&#x2019;s superior performance in processing large datasets. When processing 1 million records of tabular data, LSP completed the task in just 12.3 seconds, significantly outperforming both differential privacy (18.7 seconds) and homomorphic encryption (625.4 seconds). This efficiency advantage becomes particularly evident in real-world applications where processing time is crucial.</p><p>Scalability testing further emphasizes LSP&#x2019;s advantages, especially with larger datasets. Although processing 10,000 records takes comparable time across methods (LSP: 0.8 seconds; k-anonymity: 2.3 seconds; differential privacy: 1.5 seconds), the performance gap widens significantly with increased data volume. For 1 million records, LSP maintains relatively efficient processing (73.2 seconds) compared to k-anonymity (1258.3 seconds) and differential privacy (178.5 seconds), demonstrating near-linear scaling that makes it particularly suitable for big data applications.</p><p>LSP&#x2019;s adaptability across different data types is evidenced by consistently high <italic>F</italic><sub>1</sub>-scores across image (0.956), text (0.934), and tabular data (0.942). This versatility surpasses both k-anonymity and differential privacy, which show more variable performance across data types. The consistency of LSP&#x2019;s performance demonstrates its robustness and applicability across diverse domains.</p><p>In terms of deep learning compatibility, LSP maintains impressive performance with complex models like ResNet-50 on ImageNet, achieving 90.8% accuracy compared to raw data&#x2019;s 92.1%. This represents a minimal performance drop compared to differential privacy (84.3%) and federated learning (88.7%), indicating LSP&#x2019;s suitability for modern deep learning applications.</p><p>LSP demonstrates exceptional resistance to advanced attacks, with only a 3.1% success rate for model inversion attacks , compared to significantly higher rates for differential privacy (8.4%) and federated learning (13.7%). This robust protection against sophisticated attacks highlights LSP&#x2019;s effectiveness in maintaining privacy under adversarial conditions.</p><p>Real-time processing capabilities further distinguish LSP, with an average processing time of 8.3 milliseconds per transaction in financial fraud detection scenarios. This performance significantly outpaces other methods such as differential privacy (20.4 milliseconds), k-anonymity (31.8 milliseconds), and especially homomorphic encryption (412.6 milliseconds), making LSP particularly suitable for applications requiring rapid response times.</p><p>Finally, LSP offers superior flexibility in managing privacy-utility trade-offs, as evidenced by its privacy-utility curve AUC of 0.923, compared to differential privacy (0.876) and k-anonymity (0.801). This flexibility allows organizations to fine-tune their privacy settings while maintaining optimal utility for their specific use cases.</p><p>The technical implementation of LSP incorporates carefully optimized specifications across various dimensions to ensure optimal performance. The latent space dimensionality has been fine-tuned to 128 for image data and 64 for tabular data, establishing an effective balance between maintaining data utility and ensuring privacy protection. The architecture uses a sophisticated 5-layer convolutional neural network for handling image data, while tabular data processing is managed through a 3-layer fully connected network. Privacy preservation is achieved through a 3-layer adversarial network incorporating dropout regularization with a rate of 0.3.</p><p>From a computational perspective, the framework demonstrates practical efficiency, requiring 2.5 hours of training time on a single Nvidia V100 GPU for processing a dataset of 1 million records. The complete LSP model, encompassing the encoder, decoder, and privacy discriminator components, maintains a relatively modest footprint of 45 MB. Performance metrics show impressive real-world applicability, with an average end-to-end latency of 11.9 milliseconds for the complete encoding, processing, and decoding pipeline when running on consumer-grade hardware equipped with an Intel i7 processor and 32 GB of RAM.</p><p>These metrics demonstrate LSP&#x2019;s superior performance across various dimensions of privacy-preserving machine learning. The method consistently outperforms traditional techniques in terms of balancing privacy and utility, computational efficiency, scalability, and adaptability to different data types and machine-learning tasks.</p></sec><sec id="s4-2"><title>Latency, Scalability, and Performance Analysis</title><p>A critical consideration for any privacy-preserving technique is its impact on system performance, particularly in terms of latency and computational efficiency. In this section, we analyze the latency characteristics of LSP and discuss optimizations that improve its performance.</p><sec id="s4-2-1"><title>Latency Analysis</title><p>Our experiments show that LSP significantly reduces overall latency compared to traditional privacy-preserving methods, particularly for high-dimensional data.</p><p>Our latency analysis reveals significant performance differences among various privacy-preserving techniques. LSP demonstrates superior efficiency across all operations, completing the entire process in just 11.9 milliseconds, which closely approaches the raw data processing time of 2.1 milliseconds. Breaking down the operations, LSP requires only 5.2 milliseconds for encoding, 1.8 milliseconds for classification processing, and 4.9 milliseconds for decoding.</p><p>This performance notably outshines traditional privacy-preserving methods. In comparison, k-anonymity takes considerably longer, requiring 15.3 milliseconds for encoding, 3.8 milliseconds for classification, and 12.7 milliseconds for decoding, totaling 31.8 milliseconds. Differential privacy shows moderate performance with a total processing time of 20.4 milliseconds, split between 8.7 milliseconds for encoding, 4.2 milliseconds for classification, and 7.5 milliseconds for decoding.</p><p>Homomorphic encryption emerges as the most computationally intensive method, with substantial latency across all operations: 102.5 milliseconds for encoding, 387.6 milliseconds for classification, and 98.3 milliseconds for decoding, summing to a total of 588.4 milliseconds.</p><p>Notably, LSP achieves classification processing speeds of 1.8 milliseconds, even surpassing raw data processing (2.1 milliseconds), while maintaining robust privacy protection. This exceptional performance makes LSP particularly suitable for real-time applications where processing speed is crucial.</p></sec><sec id="s4-2-2"><title>Scalability Analysis</title><p>Our evaluation of LSP&#x2019;s scalability incorporated datasets carefully selected to represent diverse real-world scenarios and computational challenges. For the scalability experiments, we utilized datasets ranging from 10&#x00B2; to 10&#x2076; records, obtained from established public repositories including Kaggle and Huggingface. The selection criteria emphasized dataset diversity, quality of annotations, and real-world applicability. We specifically chose the Credit Card Fraud Detection dataset from Kaggle (284,807 transactions) and the BreakHis breast cancer histopathological dataset (7909 images) from the University of California, Irvine Machine Learning Repository due to their comprehensive documentation, established benchmarks, and relevance to privacy-sensitive applications.</p></sec><sec id="s4-2-3"><title>Dataset Selection</title><p>The procurement process involved rigorous verification of data quality and standardization. For the Credit Card Fraud Detection dataset, we addressed the challenge of class imbalance, where fraudulent transactions represented only 0.172% of all cases. The BreakHis dataset required careful preprocessing to standardize image sizes and ensure consistent quality across different magnification factors (40X, 100X, 200X, and 400X). Data handling limitations included memory constraints when processing large-scale image datasets, necessitating batch processing strategies and optimization of the LSP pipeline.</p><p>As illustrated in <xref ref-type="fig" rid="figure6">Figure 6</xref>, our scalability testing revealed LSP&#x2019;s superior performance compared to traditional privacy-preserving methods. The near-linear scaling behavior of LSP becomes particularly evident as dataset sizes increase beyond 10&#x2074; records. Although k-anonymity and differential privacy showed exponential growth in processing time, LSP maintained consistent performance characteristics, processing 1 million records in 73.2 seconds compared to 1258.3 seconds for k-anonymity and 178.5 seconds for differential privacy. Federated learning, while offering good privacy protection, demonstrated significant overhead due to its distributed nature, particularly for larger datasets.</p><fig position="float" id="figure6"><label>Figure 6.</label><caption><p>LSP scalability compared with other privacy-preserving methods. LSP: latent space projection.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="xmed_v6i1e70100_fig06.png"/></fig></sec><sec id="s4-2-4"><title>Real-Time Performance Analysis</title><p>The real-time performance evaluation of LSP focused on time-critical applications in financial and health care sectors. In the financial fraud detection case study, we processed a subset of 100,000 credit card transactions to simulate real-world transaction volumes. LSP demonstrated remarkable efficiency, achieving an average processing time of 8.3 milliseconds per transaction. This performance significantly surpasses traditional fraud detection systems&#x2019; requirements, which typically mandate response times under 50 milliseconds. The implementation leveraged graphics processing unit acceleration where available, though our results showed that LSP maintains acceptable performance even on central processing unit&#x2013;only systems.</p><p>For medical image analysis, we evaluated LSP using 2637 histopathological images from the BreakHis dataset, representing various types of breast cancer at different magnification levels. The system achieved an average processing time of 14.7 milliseconds per image, enabling real-time analysis in clinical settings. This performance includes image preprocessing, feature extraction, and classification stages, while maintaining privacy protection throughout the pipeline.</p><p>However, several limitations in adopting LSP methods warrant consideration. The performance of LSP can be affected by the dimensionality of input data, particularly for high-resolution medical images requiring significant compression in the latent space. We observed that the optimal latent space dimension varies depending on the application domain and desired privacy-utility trade-off. Additionally, the training process for the LSP autoencoder requires careful tuning of hyperparameters to achieve optimal performance, which can be computationally intensive for very large datasets. Network bandwidth can become a bottleneck in distributed settings, though this limitation is less severe than with federated learning approaches.</p><p>Resource requirements also present practical limitations. Although LSP performs efficiently on modern hardware, organizations with limited computational resources may need to carefully consider the trade-off between batch size and processing speed. The method&#x2019;s memory footprint increases with the size of the latent space representation, though this remains significantly lower than homomorphic encryption alternatives. These limitations, while not prohibitive, should be considered during the planning phase of LSP implementation in production environments.</p></sec></sec><sec id="s4-3"><title>Implications for Responsible AI and Governance</title><p>LSP contributes significantly to the development of responsible AI by embedding privacy protection directly into the machine learning pipeline. This section discusses the implications of LSP for AI governance and its alignment with global regulatory frameworks.</p><sec id="s4-3-1"><title>Fairness and Bias Mitigation</title><p>LSP&#x2019;s latent space transformation can help mitigate biases present in the original data. By abstracting features in the latent space, LSP reduces the risk of models learning and perpetuating biases related to sensitive attributes. Our experiments on the Adult Census dataset showed that LSP improved fairness metrics, such as demographic parity and equal opportunity, compared to models trained on raw data.</p></sec><sec id="s4-3-2"><title>Transparency and Explainability</title><p>Although the latent space representations in LSP are not directly interpretable, the framework allows for transparent auditing of the privacy-preserving process. Organizations can document the transformation keys and obfuscation techniques used, ensuring that privacy measures are auditable and explainable to regulators and stakeholders [<xref ref-type="bibr" rid="ref23">23</xref>].</p></sec><sec id="s4-3-3"><title>Accountability and Access Control</title><p>LSP introduces key-based access control, ensuring that only authorized parties can decode sensitive information. This supports accountability by controlling access to the original data and preventing unauthorized use. Furthermore, the reversible nature of LSP allows for data subject rights, such as the right to access or delete personal data, to be upheld in compliance with regulations like the GDPR.</p></sec><sec id="s4-3-4"><title>Alignment With Global AI Governance Frameworks</title><p>LSP aligns well with key AI governance frameworks and data protection regulations.</p><sec id="s4-3-4-1"><title>GDPR Compliance</title><p>LSP supports the GDPR&#x2019;s emphasis on data minimization and privacy-by-design principles. The transformation of data into latent space aligns with the GDPR&#x2019;s requirements for pseudonymization and encryption of personal data.</p></sec><sec id="s4-3-4-2"><title>CCPA and Data Portability</title><p>LSP facilitates compliance with the CCPA&#x2019;s requirements for data access and deletion rights. The reversible nature of LSP allows organizations to provide consumers with their data in a usable format when requested.</p></sec><sec id="s4-3-4-3"><title>HIPAA and Sensitive Data Protection</title><p>In health care applications, LSP ensures that personally identifiable protected health information is protected in compliance with HIPAA regulations, while still allowing for effective AI-driven diagnostics and research.</p></sec></sec></sec><sec id="s4-4"><title>Future Work</title><p>Several avenues for future research remain:</p><list list-type="order"><list-item><p>Theoretical guarantees: Developing formal privacy guarantees for LSP, possibly by integrating differential privacy concepts into the latent space projection process.</p></list-item><list-item><p>Adaptive privacy: Exploring techniques to dynamically adjust the privacy-utility trade-off based on context or user preferences.</p></list-item><list-item><p>Robustness to adversarial attacks: Conducting more extensive studies on LSP&#x2019;s resilience against various privacy attacks and developing improved defense mechanisms.</p></list-item><list-item><p>Explainable LSP: Enhancing the interpretability of LSP&#x2019;s latent representations to provide clearer insights into the privacy protection process.</p></list-item></list><p>As AI continues to permeate various aspects of society, techniques like LSP will play a crucial role in ensuring that the benefits of AI can be realized while respecting individual privacy and promoting ethical use of data. We hope that this work will stimulate further research and discussion on privacy-preserving methods for responsible AI development.</p></sec><sec id="s4-5"><title>Conclusion</title><p>This paper introduced data obfuscation through LSP as a novel privacy-preserving technique for enhancing AI governance and ensuring compliance with responsible AI standards. Through extensive experiments and real-world case studies, we demonstrated LSP&#x2019;s ability to protect sensitive information while maintaining high utility for machine learning tasks.</p><p>LSP offers several advantages over existing privacy-preserving methods. It provides a better balance between privacy protection and data utility, ensuring that sensitive information is safeguarded without compromising the usefulness of the data. Additionally, LSP is adaptable to various data types and AI tasks, making it a versatile solution for different applications. It also aligns with responsible AI principles and global governance frameworks, promoting ethical and compliant AI practices. Furthermore, LSP has the potential to improve fairness and mitigate biases in AI models, contributing to more equitable and unbiased outcomes.</p></sec></sec></body><back><notes><sec><title>Data Availability</title><p>The datasets used in this manuscript are publicly available.</p></sec></notes><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb2">AUC-ROC</term><def><p>area under the curve&#x2013;receiver operating characteristic</p></def></def-item><def-item><term id="abb3">CCPA</term><def><p>California Consumer Privacy Act</p></def></def-item><def-item><term id="abb4">GAN</term><def><p>generative adversarial network</p></def></def-item><def-item><term id="abb5">GDPR</term><def><p>General Data Protection Regulation</p></def></def-item><def-item><term id="abb6">HIPAA</term><def><p>Health Insurance Portability and Accountability Act</p></def></def-item><def-item><term id="abb7">LSP</term><def><p>latent space projection</p></def></def-item><def-item><term id="abb8">PSNR</term><def><p>peak signal-to-noise ratio</p></def></def-item><def-item><term id="abb9">ReLU</term><def><p>rectified linear unit</p></def></def-item><def-item><term id="abb10">SSIM</term><def><p>structural similarity index measure</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scheibner</surname><given-names>J</given-names> </name><name name-style="western"><surname>Raisaro</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Troncoso-Pastoriza</surname><given-names>JR</given-names> </name><etal/></person-group><article-title>Revolutionizing medical data sharing using advanced privacy-enhancing technologies: technical, legal, and ethical synthesis</article-title><source>J Med Internet Res</source><year>2021</year><month>02</month><day>25</day><volume>23</volume><issue>2</issue><fpage>e25120</fpage><pub-id pub-id-type="doi">10.2196/25120</pub-id><pub-id pub-id-type="medline">33629963</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Narayanan</surname><given-names>A</given-names> </name><name name-style="western"><surname>Shmatikov</surname><given-names>V</given-names> </name></person-group><article-title>Robust de-anonymization of large sparse datasets</article-title><access-date>2025-03-05</access-date><conf-name>2008 IEEE Symposium on Security and Privacy (sp 2008)</conf-name><conf-date>May 18-22, 2008</conf-date><conf-loc>Oakland, CA</conf-loc><fpage>111</fpage><lpage>125</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://ieeexplore.ieee.org/abstract/document/4531148">https://ieeexplore.ieee.org/abstract/document/4531148</ext-link></comment></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="preprint"><person-group person-group-type="author"><name name-style="western"><surname>Papernot</surname><given-names>N</given-names> </name><name name-style="western"><surname>McDaniel</surname><given-names>P</given-names> </name><name name-style="western"><surname>Sinha</surname><given-names>A</given-names> </name><name name-style="western"><surname>Wellman</surname><given-names>M</given-names> </name></person-group><article-title>Towards the science of security and privacy in machine learning</article-title><source>arXiv</source><comment>Preprint posted online on  Nov 11, 2016</comment><pub-id pub-id-type="doi">10.48550/arXiv.1611.03814</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Fredrikson</surname><given-names>M</given-names> </name><name name-style="western"><surname>Jha</surname><given-names>S</given-names> </name><name name-style="western"><surname>Ristenpart</surname><given-names>T</given-names> </name></person-group><article-title>Model inversion attacks that exploit confidence information and basic countermeasures</article-title><year>2015</year><month>10</month><day>12</day><conf-name>CCS&#x2019;15</conf-name><conf-date>Oct 12-16, 2015</conf-date><conf-loc>Denver, CO</conf-loc><fpage>1322</fpage><lpage>1333</lpage><pub-id pub-id-type="doi">10.1145/2810103.2813677</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Shokri</surname><given-names>R</given-names> </name><name name-style="western"><surname>Stronati</surname><given-names>M</given-names> </name><name name-style="western"><surname>Song</surname><given-names>C</given-names> </name><name name-style="western"><surname>Shmatikov</surname><given-names>V</given-names> </name></person-group><article-title>Membership inference attacks against machine learning models</article-title><conf-name>2017 IEEE Symposium on Security and Privacy (SP)</conf-name><conf-date>May 22-26, 2017</conf-date><conf-loc>San Jose, CA</conf-loc><fpage>3</fpage><lpage>18</lpage><pub-id pub-id-type="doi">10.1109/SP.2017.41</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Esmaeilzadeh</surname><given-names>P</given-names> </name></person-group><article-title>Generative AI in medical practice: in-depth exploration of privacy and security challenges</article-title><source>J Med Internet Res</source><year>2024</year><month>03</month><day>8</day><volume>26</volume><fpage>e53008</fpage><pub-id pub-id-type="doi">10.2196/53008</pub-id><pub-id pub-id-type="medline">38457208</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Carlini</surname><given-names>N</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>C</given-names> </name><name name-style="western"><surname>Erlingsson</surname><given-names>&#x00DA;</given-names> </name><name name-style="western"><surname>Kos</surname><given-names>J</given-names> </name><name name-style="western"><surname>Song</surname><given-names>D</given-names> </name></person-group><article-title>The secret sharer: evaluating and testing unintended memorization in neural networks</article-title><access-date>2025-03-05</access-date><conf-name>28th USENIX Security Symposium (USENIX Security 19)</conf-name><conf-date>Aug 14-16, 2019</conf-date><conf-loc>Santa Clara, CA</conf-loc><fpage>267</fpage><lpage>284</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://www.usenix.org/system/files/sec19-carlini.pdf">https://www.usenix.org/system/files/sec19-carlini.pdf</ext-link></comment></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sweeney</surname><given-names>L</given-names> </name></person-group><article-title>k-anonymity: a model for protecting privacy</article-title><source>Int J Unc Fuzz Knowl Based Syst</source><year>2002</year><month>10</month><volume>10</volume><issue>5</issue><fpage>557</fpage><lpage>570</lpage><pub-id pub-id-type="doi">10.1142/S0218488502001648</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Dwork</surname><given-names>C</given-names> </name><name name-style="western"><surname>Roth</surname><given-names>A</given-names> </name></person-group><article-title>The algorithmic foundations of differential privacy</article-title><source>FNT Theoretical Comput Sci</source><year>2014</year><volume>9</volume><issue>3-4</issue><fpage>211</fpage><lpage>407</lpage><pub-id pub-id-type="doi">10.1561/0400000042</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Abadi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Chu</surname><given-names>A</given-names> </name><name name-style="western"><surname>Goodfellow</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Deep learning with differential privacy</article-title><year>2016</year><month>10</month><day>24</day><conf-name>CCS&#x2019;16</conf-name><conf-date>Oct 24-28, 2016</conf-date><conf-loc>Vienna, Austria</conf-loc><fpage>308</fpage><lpage>318</lpage><pub-id pub-id-type="doi">10.1145/2976749.2978318</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chaudhuri</surname><given-names>K</given-names> </name><name name-style="western"><surname>Monteleoni</surname><given-names>C</given-names> </name><name name-style="western"><surname>Sarwate</surname><given-names>AD</given-names> </name></person-group><article-title>Differentially private empirical risk minimization</article-title><source>J Mach Learn Res</source><year>2011</year><month>03</month><volume>12</volume><fpage>1069</fpage><lpage>1109</lpage><pub-id pub-id-type="medline">21892342</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Gentry</surname><given-names>C</given-names> </name></person-group><article-title>Fully homomorphic encryption using ideal lattices</article-title><year>2009</year><month>05</month><day>31</day><conf-name>STOC &#x2019;09</conf-name><conf-date>May 31 to Jun 2, 2009</conf-date><conf-loc>Bethesda, MD</conf-loc><fpage>169</fpage><lpage>178</lpage><pub-id pub-id-type="doi">10.1145/1536414.1536440</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>McMahan</surname><given-names>B</given-names> </name><name name-style="western"><surname>Moore</surname><given-names>E</given-names> </name><name name-style="western"><surname>Ramage</surname><given-names>D</given-names> </name><name name-style="western"><surname>Hampson</surname><given-names>S</given-names> </name><name name-style="western"><surname>y Arcas</surname><given-names>BA</given-names> </name></person-group><article-title>Communication-efficient learning of deep networks from decentralized data</article-title><access-date>2025-03-05</access-date><conf-name>Artificial Intelligence and Statistics</conf-name><conf-date>Apr 20-22, 2017</conf-date><conf-loc>Fort Lauderdale, FL</conf-loc><fpage>1273</fpage><lpage>1282</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://proceedings.mlr.press/v54/mcmahan17a/mcmahan17a.pdf">https://proceedings.mlr.press/v54/mcmahan17a/mcmahan17a.pdf</ext-link></comment></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="preprint"><person-group person-group-type="author"><name name-style="western"><surname>Goodfellow</surname><given-names>I</given-names> </name><name name-style="western"><surname>Pouget-Abadie</surname><given-names>J</given-names> </name><name name-style="western"><surname>Mirza</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Generative adversarial nets</article-title><access-date>2025-03-05</access-date><comment>Preprint posted online on  Jun 10, 2014</comment><comment><ext-link ext-link-type="uri" xlink:href="https://arxiv.org/abs/1406.2661">https://arxiv.org/abs/1406.2661</ext-link></comment></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>McSherry</surname><given-names>FD</given-names> </name></person-group><article-title>Privacy integrated queries: an extensible platform for privacy-preserving data analysis</article-title><conf-name>Proceedings of the 2009 ACM SIGMOD International Conference on Management of data</conf-name><conf-date>Jun 29 to Jul 2, 2009</conf-date><conf-loc>Providence, RI</conf-loc><fpage>19</fpage><lpage>30</lpage><pub-id pub-id-type="doi">10.1145/1559845.1559850</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="preprint"><person-group person-group-type="author"><name name-style="western"><surname>Kingma</surname><given-names>DP</given-names> </name><name name-style="western"><surname>Welling</surname><given-names>M</given-names> </name></person-group><article-title>Auto-encoding variational Bayes</article-title><source>arXiv</source><access-date>2025-03-05</access-date><comment>Preprint posted online on  May 1, 2013</comment><comment><ext-link ext-link-type="uri" xlink:href="https://www.cs.columbia.edu/~blei/fogm/2018F/materials/KingmaWelling2013.pdf">https://www.cs.columbia.edu/~blei/fogm/2018F/materials/KingmaWelling2013.pdf</ext-link></comment></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Dwork</surname><given-names>C</given-names> </name><name name-style="western"><surname>McSherry</surname><given-names>F</given-names> </name><name name-style="western"><surname>Nissim</surname><given-names>K</given-names> </name><name name-style="western"><surname>Smith</surname><given-names>A</given-names> </name></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Halevi</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rabin</surname><given-names>T</given-names> </name></person-group><article-title>Calibrating noise to sensitivity in private data analysis</article-title><source>Theory of Cryptography TCC 2006 Lecture Notes in Computer Science</source><year>2006</year><volume>3876</volume><publisher-name>Springer</publisher-name><pub-id pub-id-type="doi">10.1007/11681878_14</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Balle</surname><given-names>B</given-names> </name><name name-style="western"><surname>Barthe</surname><given-names>G</given-names> </name><name name-style="western"><surname>Gaboardi</surname><given-names>M</given-names> </name></person-group><article-title>Privacy amplification by subsampling: tight analyses via couplings and divergences</article-title><source>Adv Neural Inf Process Syst</source><year>2018</year><access-date>2025-03-05</access-date><volume>31</volume><comment><ext-link ext-link-type="uri" xlink:href="https://proceedings.neurips.cc/paper_files/paper/2018/file/3b5020bb891119b9f5130f1fea9bd773-Paper.pdf">https://proceedings.neurips.cc/paper_files/paper/2018/file/3b5020bb891119b9f5130f1fea9bd773-Paper.pdf</ext-link></comment></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Gilad-Bachrach</surname><given-names>R</given-names> </name><name name-style="western"><surname>Dowlin</surname><given-names>N</given-names> </name><name name-style="western"><surname>Laine</surname><given-names>K</given-names> </name><name name-style="western"><surname>Lauter</surname><given-names>K</given-names> </name><name name-style="western"><surname>Naehrig</surname><given-names>M</given-names> </name><name name-style="western"><surname>Wernsing</surname><given-names>J</given-names> </name></person-group><article-title>Cryptonets: applying neural networks to encrypted data with high throughput and accuracy</article-title><access-date>2025-03-05</access-date><conf-name>International Conference on Machine Learning</conf-name><conf-date>Jun 19-24, 2016</conf-date><conf-loc>New York, NY</conf-loc><fpage>201</fpage><lpage>210</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://proceedings.mlr.press/v48/gilad-bachrach16.pdf">https://proceedings.mlr.press/v48/gilad-bachrach16.pdf</ext-link></comment></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Melis</surname><given-names>L</given-names> </name><name name-style="western"><surname>Song</surname><given-names>C</given-names> </name><name name-style="western"><surname>De Cristofaro</surname><given-names>E</given-names> </name><name name-style="western"><surname>Shmatikov</surname><given-names>V</given-names> </name></person-group><article-title>Exploiting unintended feature leakage in collaborative learning</article-title><conf-name>2019 IEEE Symposium on Security and Privacy (SP)</conf-name><conf-date>May 20-22, 2019</conf-date><conf-loc>San Francisco, CA</conf-loc><fpage>691</fpage><lpage>706</lpage><pub-id pub-id-type="doi">10.1109/SP.2019.00029</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>GH</given-names> </name><name name-style="western"><surname>Shin</surname><given-names>SY</given-names> </name></person-group><article-title>Federated learning on clinical benchmark data: performance assessment</article-title><source>J Med Internet Res</source><year>2020</year><month>10</month><day>26</day><volume>22</volume><issue>10</issue><fpage>e20891</fpage><pub-id pub-id-type="doi">10.2196/20891</pub-id><pub-id pub-id-type="medline">33104011</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Xu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Skoularidou</surname><given-names>M</given-names> </name><name name-style="western"><surname>Cuesta-Infante</surname><given-names>A</given-names> </name><name name-style="western"><surname>Veeramachaneni</surname><given-names>K</given-names> </name></person-group><article-title>Modeling tabular data using conditional GAN</article-title><access-date>2025-03-05</access-date><conf-name>Advances in Neural Information Processing Systems</conf-name><conf-date>Dec 8-14, 2019</conf-date><conf-loc>Montreal, QC</conf-loc><fpage>7333</fpage><lpage>7343</lpage><comment><ext-link ext-link-type="uri" xlink:href="https://proceedings.neurips.cc/paper_files/paper/2019/file/254ed7d2de3b23ab10936522dd547b78-Paper.pdf">https://proceedings.neurips.cc/paper_files/paper/2019/file/254ed7d2de3b23ab10936522dd547b78-Paper.pdf</ext-link></comment></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Adadi</surname><given-names>A</given-names> </name><name name-style="western"><surname>Berrada</surname><given-names>M</given-names> </name></person-group><article-title>Peeking inside the black-box: a survey on explainable artificial intelligence (XAI)</article-title><source>IEEE Access</source><year>2018</year><volume>6</volume><fpage>52138</fpage><lpage>52160</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2018.2870052</pub-id></nlm-citation></ref></ref-list></back></article>