|
{ |
|
"paper_id": "2022", |
|
"header": { |
|
"generated_with": "S2ORC 1.0.0", |
|
"date_generated": "2023-01-19T09:40:13.249137Z" |
|
}, |
|
"title": "Adaptive Differential Privacy for Language Modeling", |
|
"authors": [ |
|
{ |
|
"first": "Xinwei", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tianjin University", |
|
"location": { |
|
"settlement": "Tianjin", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Gong", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "ByteDance Lark AI", |
|
"location": { |
|
"settlement": "Beijing", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "" |
|
}, |
|
{ |
|
"first": "Deyi", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "", |
|
"affiliation": { |
|
"laboratory": "", |
|
"institution": "Tianjin University", |
|
"location": { |
|
"settlement": "Tianjin", |
|
"country": "China" |
|
} |
|
}, |
|
"email": "[email protected]" |
|
} |
|
], |
|
"year": "", |
|
"venue": null, |
|
"identifiers": {}, |
|
"abstract": "Although differential privacy (DP) can protect language models from leaking privacy, its indiscriminative protection on all data points reduces its practical utility. Previous works improve DP training by discriminating private and non-private data. But these works rely on datasets with prior privacy information, which is not available in real-world scenarios. In this paper, we propose an Adaptive Differential Privacy (ADP) framework for language modeling without resorting to prior privacy information. We estimate the probability that a linguistic item contains privacy based on a language model. We further propose a new Adam algorithm that adjusts the degree of differential privacy noise injected to the language model according to the estimated privacy probabilities. Experiments demonstrate that our ADP improves differentially private language modeling to achieve good protection from canary attackers.", |
|
"pdf_parse": { |
|
"paper_id": "2022", |
|
"_pdf_hash": "", |
|
"abstract": [ |
|
{ |
|
"text": "Although differential privacy (DP) can protect language models from leaking privacy, its indiscriminative protection on all data points reduces its practical utility. Previous works improve DP training by discriminating private and non-private data. But these works rely on datasets with prior privacy information, which is not available in real-world scenarios. In this paper, we propose an Adaptive Differential Privacy (ADP) framework for language modeling without resorting to prior privacy information. We estimate the probability that a linguistic item contains privacy based on a language model. We further propose a new Adam algorithm that adjusts the degree of differential privacy noise injected to the language model according to the estimated privacy probabilities. Experiments demonstrate that our ADP improves differentially private language modeling to achieve good protection from canary attackers.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Abstract", |
|
"sec_num": null |
|
} |
|
], |
|
"body_text": [ |
|
{ |
|
"text": "Language modeling is a foundation problem in natural language processing (Bommasani et al., 2021) . Recent large language models (Brown et al., 2020; Zeng et al., 2021) are usually trained at scale. Unfortunately, large language models have a tendency to remember training data in the absence of appropriate privacy protection mechanisms (Carlini et al., 2019 (Carlini et al., , 2021 . Since data, which are usually collected from public sources, e.g., tweets, blogs, may contain sensitive information (personal address, SSN numbers, and so on) learning a safe large language model has become increasingly important.", |
|
"cite_spans": [ |
|
{ |
|
"start": 73, |
|
"end": 97, |
|
"text": "(Bommasani et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 129, |
|
"end": 149, |
|
"text": "(Brown et al., 2020;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 150, |
|
"end": 168, |
|
"text": "Zeng et al., 2021)", |
|
"ref_id": "BIBREF21" |
|
}, |
|
{ |
|
"start": 338, |
|
"end": 359, |
|
"text": "(Carlini et al., 2019", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 360, |
|
"end": 383, |
|
"text": "(Carlini et al., , 2021", |
|
"ref_id": "BIBREF6" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In recent years, differential privacy (Dwork, 2008; Dwork et al., 2014) has become a key privacy preservation method, which attempts to ran-domize the training algorithm so that the model does not rely too much on any single training instances. Abadi et al. (2016) propose Differential Private Stochastic Gradient Descent (DP-SGD) to protect deep learning models by adding random noise to gradients. However, traditional differential privacy ignores individual attributes of data (McMahan et al., 2018) . This overly pessimistic privacy protection results in poor performance or even mis-convergence of training for differentially private language models (Anil et al., 2021) . Therefore, approaches are proposed to mitigate this problem by treating private and non-private data separately during the DP training process, such as selective differential privacy (Shi et al., 2021) and sensory-based privacy-\u03c7 (Qu et al., 2021) . These methods require training data to provide privacy information as a hard label. Unfortunately, it is usually difficult and expensive to manually annotate privacy labels to data. Other studies (Xu et al., 2019; Tesfay et al., 2019) learn to detect privacy information in unstructured texts. However, the prerequisite is knowing keywords or reference texts of privacy information (Neerbek, 2020) . Therefore, learning differentially private language models on data without prior privacy information is an open problem yet to be investigated.", |
|
"cite_spans": [ |
|
{ |
|
"start": 38, |
|
"end": 51, |
|
"text": "(Dwork, 2008;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 52, |
|
"end": 71, |
|
"text": "Dwork et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 245, |
|
"end": 264, |
|
"text": "Abadi et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
}, |
|
{ |
|
"start": 480, |
|
"end": 502, |
|
"text": "(McMahan et al., 2018)", |
|
"ref_id": "BIBREF13" |
|
}, |
|
{ |
|
"start": 655, |
|
"end": 674, |
|
"text": "(Anil et al., 2021)", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 860, |
|
"end": 878, |
|
"text": "(Shi et al., 2021)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 907, |
|
"end": 924, |
|
"text": "(Qu et al., 2021)", |
|
"ref_id": "BIBREF16" |
|
}, |
|
{ |
|
"start": 1123, |
|
"end": 1140, |
|
"text": "(Xu et al., 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 1141, |
|
"end": 1161, |
|
"text": "Tesfay et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
}, |
|
{ |
|
"start": 1309, |
|
"end": 1324, |
|
"text": "(Neerbek, 2020)", |
|
"ref_id": "BIBREF15" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "In this paper, we propose an Adaptive Differential Privacy (ADP) framework without resorting to prior privacy information. The basic assumption behind ADP is that linguistic items containing private information do not occur frequently in realworld texts. Hence, the probability that a linguistic item contains privacy information (hereinafter privacy probability) is inversely proportional to the frequency of the linguistic item occurring in the dataset. With this assumption, we can estimate the privacy probability of a linguistic item based on a language model. After estimating these probabilities, we relax the constraint of differential privacy, and propose an adaptive differential privacy method, which adjusts the Guassian noise of differential privacy based on privacy probabilities. To enable this adaptive differential privacy strategy, we further present Adaptive-DP-Adam Algorithm to train differentially private language models.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "To evaluate our approach, we train transformerbased language models, and compare the performance of adaptive differential privacy against traditional differential privacy methods. Additionally, we verify the protection effectiveness of ADP models with canary attackers (Carlini et al., 2019) . The results suggest that our adaptive differential privacy method can achieve good performance and protection from canary attackers.", |
|
"cite_spans": [ |
|
{ |
|
"start": 269, |
|
"end": 291, |
|
"text": "(Carlini et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "The main contributions of this paper are threefold.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We propose a method to automatically estimate the probability that a linguistic item contains privacy information, relaxing the requirement of prior privacy information of previous methods.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 A new Adaptive-DP-Adam algorithm is proposed, which adaptively adjusts the magnitude of differential privacy noise to be injected into language models according to privacy probabilities. 1", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "\u2022 We conduct experiments to validate the effectiveness of the proposed adaptive differential privacy in improving the performance of differentially private models and protecting sensitive information.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Introduction", |
|
"sec_num": "1" |
|
}, |
|
{ |
|
"text": "Large language models (Brown et al., 2020; Zhang et al., 2020) have been attracting growing attention. Powerful large language models can achieve substantial improvements on a wide range of downstream NLP tasks. Unfortunately, large language models have a tendency to memorize training data (Carlini et al., 2019) . Carlini et al. (2021) have successfully induced GPT-2 (Radford et al., 2019) to output sensitive information in its training data. Differential privacy (Dwork, 2008; Dwork et al., 2014) is widely used to protect private information of data. Abadi et al. (2016) propose the DP-SGD algorithm to train deep learning models, and apply moment accounting to calculate cumulative privacy loss during training. Although DP-SGD can limit the risk of leaking information from training data, random noise on gradients usually degrades corresponding models , and even cause training to not converge when a large model is trained.", |
|
"cite_spans": [ |
|
{ |
|
"start": 22, |
|
"end": 42, |
|
"text": "(Brown et al., 2020;", |
|
"ref_id": "BIBREF4" |
|
}, |
|
{ |
|
"start": 43, |
|
"end": 62, |
|
"text": "Zhang et al., 2020)", |
|
"ref_id": "BIBREF22" |
|
}, |
|
{ |
|
"start": 291, |
|
"end": 313, |
|
"text": "(Carlini et al., 2019)", |
|
"ref_id": "BIBREF5" |
|
}, |
|
{ |
|
"start": 316, |
|
"end": 337, |
|
"text": "Carlini et al. (2021)", |
|
"ref_id": "BIBREF6" |
|
}, |
|
{ |
|
"start": 370, |
|
"end": 392, |
|
"text": "(Radford et al., 2019)", |
|
"ref_id": "BIBREF17" |
|
}, |
|
{ |
|
"start": 468, |
|
"end": 481, |
|
"text": "(Dwork, 2008;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 482, |
|
"end": 501, |
|
"text": "Dwork et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 557, |
|
"end": 576, |
|
"text": "Abadi et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "To improve DP-SGD, one way is to change training settings Hoory et al., 2021) , e.g., increasing the batch size or decreasing clipping norm. However, these methods are usually at a higher cost. Other attempts to improve the utilization of dataset information by relaxing the constraints of differential privacy. For example, Ebadi et al. (2015) propose personalized differentiated privacy to provide different levels of privacy protection for different users. Kotsogiannis et al. (2020) develop one-sided differential privacy that only protects sensitive users. Shi et al. (2021) introduce Selective Differential Privacy to add noise only into private data. These methods all need to know which items in the dataset contain private information, which is prohibitively expensive for large-scale datasets. There are some previous works (Xu et al., 2019; Tesfay et al., 2019) detecting sensitive information in unstructured texts, but relying on labeled keywords or reference texts.", |
|
"cite_spans": [ |
|
{ |
|
"start": 58, |
|
"end": 77, |
|
"text": "Hoory et al., 2021)", |
|
"ref_id": "BIBREF10" |
|
}, |
|
{ |
|
"start": 325, |
|
"end": 344, |
|
"text": "Ebadi et al. (2015)", |
|
"ref_id": "BIBREF9" |
|
}, |
|
{ |
|
"start": 460, |
|
"end": 486, |
|
"text": "Kotsogiannis et al. (2020)", |
|
"ref_id": "BIBREF11" |
|
}, |
|
{ |
|
"start": 562, |
|
"end": 579, |
|
"text": "Shi et al. (2021)", |
|
"ref_id": "BIBREF18" |
|
}, |
|
{ |
|
"start": 834, |
|
"end": 851, |
|
"text": "(Xu et al., 2019;", |
|
"ref_id": "BIBREF20" |
|
}, |
|
{ |
|
"start": 852, |
|
"end": 872, |
|
"text": "Tesfay et al., 2019)", |
|
"ref_id": "BIBREF19" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Related Work", |
|
"sec_num": "2" |
|
}, |
|
{ |
|
"text": "We will introduce differential privacy (Dwork, 2008; Dwork et al., 2014) , and the DP-SGD algorithm (Abadi et al., 2016) as preliminaries in this section.", |
|
"cite_spans": [ |
|
{ |
|
"start": 39, |
|
"end": 52, |
|
"text": "(Dwork, 2008;", |
|
"ref_id": "BIBREF7" |
|
}, |
|
{ |
|
"start": 53, |
|
"end": 72, |
|
"text": "Dwork et al., 2014)", |
|
"ref_id": "BIBREF8" |
|
}, |
|
{ |
|
"start": 100, |
|
"end": 120, |
|
"text": "(Abadi et al., 2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Preliminary", |
|
"sec_num": "3" |
|
}, |
|
{ |
|
"text": "Intuitively, an algorithm is (\u03f5; \u03b4)-DP if the output of the algorithm cannot be used to probabilistically determine the presence of a single record in the dataset by a factor of e \u03f5 . Formally, an algorithm A satisfies (\u03f5; \u03b4)-DP if for all datasets (D 1 ;D 2 ) that differ from each other by at least one instance, and for any set S, we have P {A(D 1 ) \u2208 S} \u2264 e \u03f5 P {A(D 2 ) \u2208 S} + \u03b4, where smaller \u03f5 values indicate a stronger privacy protection.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Differential Privacy", |
|
"sec_num": "3.1" |
|
}, |
|
{ |
|
"text": "The basic idea of DP-SGD is to clip each example gradients and add noise during model training.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DP-SGD Optimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "Specifically, for a batch of size L, the loss function is L", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DP-SGD Optimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(\u03b8) = 1 L x i L(x i ; \u03b8).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DP-SGD Optimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "For each sample x i in the batch, the gradient of g(x i ) is first cut using the l 2 norm according to the gradient clipping level C, so that the maximum value of loss does not exceed C:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DP-SGD Optimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "g(x i ) = 1 max{1, \u2225\u2207 \u03b8 L(x i ; \u03b8)\u2225 2 /C} \u2207 \u03b8 L(x i ; \u03b8).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DP-SGD Optimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "(1) For a batch L t , after the sum of clipping gradients of all samples in L t is calculated, the Gaussian noise z \u223c N (0, \u03c3 2 C 2 I) is added to the sum of gradients. Hence a new gradientg Lt required for back propagation is computed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DP-SGD Optimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "g Lt = 1 L ( x i g(x i ) + z t ).", |
|
"eq_num": "(2)" |
|
} |
|
], |
|
"section": "DP-SGD Optimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "The smaller C can lead to more stable training. And a smaller value of \u03c3 indicates smaller noise z.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "DP-SGD Optimization", |
|
"sec_num": "3.2" |
|
}, |
|
{ |
|
"text": "In this section, we will elaborate the proposed Adaptive Differential Privacy. First, we introduce a method to evaluate the privacy probability of a linguistic item. Second, we propose an adaptive noise method, which adjusts the noise magnitude according to the privacy probability of an item in DP-SGD process. Finally, an Adam gradient optimization algorithm based on adaptive noise is proposed.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Differential Privacy", |
|
"sec_num": "4" |
|
}, |
|
{ |
|
"text": "The range of privacy is not fixed but relying on its owner, which makes it hard to judge the privacy.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Privacy Probability Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "To solve this problem, we introduce the following assumption. Assumption 1: Texts containing privacy information do not occur frequently in a large dataset.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Privacy Probability Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We assume that the probability of texts containing private information is related to the frequency of texts appearing in dataset. Hence, the judgment of privacy can be transformed into the evaluation of the text frequency, which means the privacy probability of a token sequence is in direct proportion to the frequency of this sequence.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Privacy Probability Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "We then introduce a simple yet effective method to measure the frequency of text based on largescale pre-trained language models. Giving a token sequence s = x 1 , x 2 , ..., x n , the perplexity of the sequence is computed as follows:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Privacy Probability Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "P(s) = exp(\u2212 1 n n i=1 log f \u03b8 (x i |x 1 , ..., x i\u22121 )).", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Privacy Probability Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "(3) When the perplexity is low, it indicates that the average probability of text prediction is high. Large language models like GPT use a huge amount of text data for training. Hence, we consider such a large language model to be a trustworthy estimator.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Privacy Probability Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "The perplexity from a trustworthy language model is inversely proportional to the occurrence frequence of the text o(s) \u221d 1 P(s) , and the privacy probability of s is proportional to the perplexity of s: \u03c1(s) \u221d P(s). Based on this, we propose a formula for calculating the privacy probability:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Privacy Probability Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03c1(s) = normalize(P(s)),", |
|
"eq_num": "(4)" |
|
} |
|
], |
|
"section": "Privacy Probability Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "where s \u2208 D and normalize is a normalization operator that transforms values into probability values (i.e., falling between 0 and 1). The above method that estimates the privacy probability is not precise enough, which will inevitably cause some non-private and long-tail instances to be identified as private samples. However, from the perspective of privacy protection, such a cost is still acceptable.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Privacy Probability Evaluation", |
|
"sec_num": "4.1" |
|
}, |
|
{ |
|
"text": "During differential privacy training, in the batch B = s 1 , s 2 , ..., s L of size L, the privacy probability of a token sequence s i \u2208 B is \u03c1(s i ), and the Gaussian noise of B is z B = N (0, C 2 \u03c3 2 I 2 ), where \u03c3 is a noise multiplier, and C is the clipping norm. To improve the target model performance, we introduce the privacy weight to change the magnitude of Gaussian noise", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Noise", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "\u03b3 B = L i \u03c1(s i ) L .", |
|
"eq_num": "(5)" |
|
} |
|
], |
|
"section": "Adaptive Noise", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "The privacy weight denotes a privacy probability averaged over batch B. We incorporate it to the Gaussian noise:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Noise", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "z B adp = \u03b3 B \u2022 N (0, C 2 \u03c3 2 I 2 ).", |
|
"eq_num": "(6)" |
|
} |
|
], |
|
"section": "Adaptive Noise", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "Through this method, we adaptively change the noise of every batch according to its privacy weight.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive Noise", |
|
"sec_num": "4.2" |
|
}, |
|
{ |
|
"text": "With the adaptive noise, we further develop a privacy mechanism to train models. Abadi et al. (2016) propose DP-SGD that adds Gaussian noise to gradients and applies stochastic gradient descent (SGD) to train private deep learning models. We incorporate our proposed adaptive noise into DP-SGD. Such adapted framework is also suitable for other optimization algorithms such as Adam. The whole procedure of Adaptive-DP-Adam is described in Algorithm 1.", |
|
"cite_spans": [ |
|
{ |
|
"start": 81, |
|
"end": 100, |
|
"text": "Abadi et al. (2016)", |
|
"ref_id": "BIBREF0" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive DP Optimization", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Algorithm 1: Adaptive-DP-Adam", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive DP Optimization", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "1 Input: dataset D = {x i } N i=1 , a large language model f LM , loss function L(\u03b8) 2 Parameters: learning rate \u03b7, noise level \u03c3, batch B of size L, clipping norm C, step E, Adam parameters {\u03b8 0 , m 0 , m 1 , \u03b4 1 , \u03b4 2 } 1: Let G(\u03c6) = 0 2: for all t \u2208 T do 3:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive DP Optimization", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Sample a batch B t , with sampling probability L/N", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Adaptive DP Optimization", |
|
"sec_num": "4.3" |
|
}, |
|
{ |
|
"text": "Calculate \u03b3 Bt based on Eq. (5) 5:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "for all x i \u2208 B t do 6: Clip gradients g t (x i ) \u2190 g t (x i ) \u2022 min(1, C/ \u2225g t (x i )\u2225 2 ) 7:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "end for 8:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Generate adaptive noise z t based on Eq. 69:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Calculate average gradients", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "g t (x i ) = 1 L (z t + L i=1g t (x i ))", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "10:", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Update parameters \u03b8 using usual Adam 11: end for 12: return \u03b8 T 5 Experiments", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "4:", |
|
"sec_num": null |
|
}, |
|
{ |
|
"text": "Dataset We used Wikitext-103 (Merity et al., 2016) to train our model, which is a widely used dataset for language modeling from a set of verified Good and Featured articles on Wikipedia.", |
|
"cite_spans": [ |
|
{ |
|
"start": 29, |
|
"end": 50, |
|
"text": "(Merity et al., 2016)", |
|
"ref_id": "BIBREF14" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Baselines We have two baselines, one without DP (denoted by \"No-DP\"), and the other trained with DP-SGD (denoted by \"DP-SGD\"). We refer to our models trained with ADP-SGD as \"ADP\".", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Hyper-parameters We used a 12-layer transformer decoder to train the language model with hidden size of 1024 and batch size of 4096, training 20 epoches with inital learning rate of 5 \u00d7 10 \u22125 . The clipping norm C was set to 0.001, and the noise multiplier \u03c3 was 1 or 5.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Settings", |
|
"sec_num": "5.1" |
|
}, |
|
{ |
|
"text": "Canary insertion is proposed by Carlini et al. (2019) , which inserts random sequences called canaries into the training dataset and calculates the exposure for the inserted canaries during testing to measure whether the model memorizes these canaries. In our setting, we injected \"My ID is 955320\" into the Wikitext-103 dataset for 10, 100, and 1000 times to make the differences between ", |
|
"cite_spans": [ |
|
{ |
|
"start": 32, |
|
"end": 53, |
|
"text": "Carlini et al. (2019)", |
|
"ref_id": "BIBREF5" |
|
} |
|
], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Canary Attacker", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "EQUATION", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [ |
|
{ |
|
"start": 0, |
|
"end": 8, |
|
"text": "EQUATION", |
|
"ref_id": "EQREF", |
|
"raw_str": "exposure = log 2 |R| \u2212 log 2 rank \u03b8 (s[r]),", |
|
"eq_num": "(7)" |
|
} |
|
], |
|
"section": "Canary Attacker", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "where R is the set of all possible results, and rank(s[r]) is the position of s[r] in R. The lower the exposure, the safer the model is.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Canary Attacker", |
|
"sec_num": "5.2" |
|
}, |
|
{ |
|
"text": "Model Performance We first evaluated models trained by different privacy settings on language modeling task. Both models were trained using a transformer decoder architecture. As shown in Table 1 , DP-SGD performs poorly, and larger noise \u03c3 further worses the model. In contrast, our ADP helps model to alleviate the decaying performance, and the utility grows when the noise multiplier \u03c3 is large. Although the privacy guarantee \u03f5 of ADP increases compared to DP-SGD when the noise multiplier \u03c3 is 1 and 5, the privacy guarantee of ADP is within the acceptable range. It suggests that our ADP can improve the performance of differentially private language models with tight privacy guarantee.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 188, |
|
"end": 195, |
|
"text": "Table 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "Protection Against Attacker Our second group of experiments, described in section 5.2, is to test the model memorization of private information. We evaluated models trained on the Wikitext-103 dataset injected canaries. We used text generation to evaluate the exposure of canaries from different language models. As can be seen from Figure 1, even when private item appears as many as 1000 times in the data, the ADP model performs significantly better than the non-DP model. However, exposures of the ADP model are larger than the DP-SGD model. It suggests that ADP method can protect privacy information from leaking from training data, but the protection performance is slightly worse than DP-SGD. Figure 1 : The exposure of canaries from different language models. All models were trained for 20 epoches.", |
|
"cite_spans": [], |
|
"ref_spans": [ |
|
{ |
|
"start": 333, |
|
"end": 339, |
|
"text": "Figure", |
|
"ref_id": null |
|
}, |
|
{ |
|
"start": 701, |
|
"end": 709, |
|
"text": "Figure 1", |
|
"ref_id": null |
|
} |
|
], |
|
"eq_spans": [], |
|
"section": "Results", |
|
"sec_num": "5.3" |
|
}, |
|
{ |
|
"text": "We have presented a new method to estimate the privacy probability of a linguistic item when the privacy information of the dataset is not known. With estimated privacy probabilities, we propose adaptive differential privacy (ADP), to improve the model utility. We also present a privacy optimization algorithm, Adaptive-DP-Adam, to train differentially private models. Our experiments show that models trained with ADP achieve better utilities than traditional DP and are capable of protecting sensitive information from being leaked.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Conclusion", |
|
"sec_num": "6" |
|
}, |
|
{ |
|
"text": "Code is available at https://github.com/ flamewei123/ADP.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "", |
|
"sec_num": null |
|
} |
|
], |
|
"back_matter": [ |
|
{ |
|
"text": "The work was partially supported by a ByteDance Research Collaboration Project (PJ20210625900030) and the Natural Science Foundation of Tianjin (Grant No. 19JCZDJC31400). We would like to thank the anonymous reviewers for their insightful comments.", |
|
"cite_spans": [], |
|
"ref_spans": [], |
|
"eq_spans": [], |
|
"section": "Acknowledgements", |
|
"sec_num": null |
|
} |
|
], |
|
"bib_entries": { |
|
"BIBREF0": { |
|
"ref_id": "b0", |
|
"title": "Deep learning with differential privacy", |
|
"authors": [ |
|
{ |
|
"first": "Martin", |
|
"middle": [], |
|
"last": "Abadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Andy", |
|
"middle": [], |
|
"last": "Chu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ian", |
|
"middle": [], |
|
"last": "Goodfellow", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Brendan", |
|
"middle": [], |
|
"last": "Mcmahan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Mironov", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kunal", |
|
"middle": [], |
|
"last": "Talwar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "Proceedings of the 2016 ACM SIGSAC conference on computer and communications security", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "308--318", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Martin Abadi, Andy Chu, Ian Goodfellow, H Bren- dan McMahan, Ilya Mironov, Kunal Talwar, and Li Zhang. 2016. Deep learning with differential pri- vacy. In Proceedings of the 2016 ACM SIGSAC con- ference on computer and communications security, pages 308-318.", |
|
"links": null |
|
}, |
|
"BIBREF3": { |
|
"ref_id": "b3", |
|
"title": "On the opportunities and risks of foundation models", |
|
"authors": [ |
|
{ |
|
"first": "Jeannette", |
|
"middle": [], |
|
"last": "Michael S Bernstein", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Antoine", |
|
"middle": [], |
|
"last": "Bohg", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Emma", |
|
"middle": [], |
|
"last": "Bosselut", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Brunskill", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Michael S Bernstein, Jeannette Bohg, Antoine Bosselut, Emma Brunskill, et al. 2021. On the opportunities and risks of foundation models. https://openreview.net/forum?id=NTs-oIaO6O.", |
|
"links": null |
|
}, |
|
"BIBREF4": { |
|
"ref_id": "b4", |
|
"title": "Language models are few-shot learners", |
|
"authors": [ |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Benjamin", |
|
"middle": [], |
|
"last": "Mann", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Nick", |
|
"middle": [], |
|
"last": "Ryder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Melanie", |
|
"middle": [], |
|
"last": "Subbiah", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jared", |
|
"middle": [ |
|
"D" |
|
], |
|
"last": "Kaplan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Prafulla", |
|
"middle": [], |
|
"last": "Dhariwal", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Arvind", |
|
"middle": [], |
|
"last": "Neelakantan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Pranav", |
|
"middle": [], |
|
"last": "Shyam", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Girish", |
|
"middle": [], |
|
"last": "Sastry", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amanda", |
|
"middle": [], |
|
"last": "Askell", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Advances in neural information processing systems", |
|
"volume": "33", |
|
"issue": "", |
|
"pages": "1877--1901", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, et al. 2020. Language models are few-shot learners. Advances in neural information processing systems, 33:1877-1901.", |
|
"links": null |
|
}, |
|
"BIBREF5": { |
|
"ref_id": "b5", |
|
"title": "The secret sharer: Evaluating and testing unintended memorization in neural networks", |
|
"authors": [ |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Carlini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chang", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "\u00dalfar", |
|
"middle": [], |
|
"last": "Erlingsson", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jernej", |
|
"middle": [], |
|
"last": "Kos", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dawn", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "28th USENIX Security Symposium (USENIX Security 19)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "267--284", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicholas Carlini, Chang Liu, \u00dalfar Erlingsson, Jernej Kos, and Dawn Song. 2019. The secret sharer: Eval- uating and testing unintended memorization in neu- ral networks. In 28th USENIX Security Symposium (USENIX Security 19), pages 267-284.", |
|
"links": null |
|
}, |
|
"BIBREF6": { |
|
"ref_id": "b6", |
|
"title": "Extracting training data from large language models", |
|
"authors": [ |
|
{ |
|
"first": "Nicholas", |
|
"middle": [], |
|
"last": "Carlini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Tramer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Eric", |
|
"middle": [], |
|
"last": "Wallace", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Matthew", |
|
"middle": [], |
|
"last": "Jagielski", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ariel", |
|
"middle": [], |
|
"last": "Herbert-Voss", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Katherine", |
|
"middle": [], |
|
"last": "Lee", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Adam", |
|
"middle": [], |
|
"last": "Roberts", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tom", |
|
"middle": [], |
|
"last": "Brown", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dawn", |
|
"middle": [], |
|
"last": "Song", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ulfar", |
|
"middle": [], |
|
"last": "Erlingsson", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "30th USENIX Security Symposium (USENIX Security 21)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "2633--2650", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Nicholas Carlini, Florian Tramer, Eric Wallace, Matthew Jagielski, Ariel Herbert-Voss, Katherine Lee, Adam Roberts, Tom Brown, Dawn Song, Ulfar Erlingsson, et al. 2021. Extracting training data from large language models. In 30th USENIX Security Symposium (USENIX Security 21), pages 2633-2650.", |
|
"links": null |
|
}, |
|
"BIBREF7": { |
|
"ref_id": "b7", |
|
"title": "Differential privacy: A survey of results", |
|
"authors": [ |
|
{ |
|
"first": "Cynthia", |
|
"middle": [], |
|
"last": "Dwork", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2008, |
|
"venue": "International conference on theory and applications of models of computation", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1--19", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cynthia Dwork. 2008. Differential privacy: A survey of results. In International conference on theory and applications of models of computation, pages 1-19. Springer.", |
|
"links": null |
|
}, |
|
"BIBREF8": { |
|
"ref_id": "b8", |
|
"title": "The algorithmic foundations of differential privacy", |
|
"authors": [ |
|
{ |
|
"first": "Cynthia", |
|
"middle": [], |
|
"last": "Dwork", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aaron", |
|
"middle": [], |
|
"last": "Roth", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2014, |
|
"venue": "Found. Trends Theor. Comput. Sci", |
|
"volume": "9", |
|
"issue": "3-4", |
|
"pages": "211--407", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Cynthia Dwork, Aaron Roth, et al. 2014. The algo- rithmic foundations of differential privacy. Found. Trends Theor. Comput. Sci., 9(3-4):211-407.", |
|
"links": null |
|
}, |
|
"BIBREF9": { |
|
"ref_id": "b9", |
|
"title": "Differential privacy: Now it's getting personal", |
|
"authors": [ |
|
{ |
|
"first": "Hamid", |
|
"middle": [], |
|
"last": "Ebadi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Sands", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Gerardo", |
|
"middle": [], |
|
"last": "Schneider", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2015, |
|
"venue": "Acm Sigplan Notices", |
|
"volume": "50", |
|
"issue": "1", |
|
"pages": "69--81", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Hamid Ebadi, David Sands, and Gerardo Schneider. 2015. Differential privacy: Now it's getting personal. Acm Sigplan Notices, 50(1):69-81.", |
|
"links": null |
|
}, |
|
"BIBREF10": { |
|
"ref_id": "b10", |
|
"title": "Learning and evaluating a differentially private pre-trained language model", |
|
"authors": [ |
|
{ |
|
"first": "Shlomo", |
|
"middle": [], |
|
"last": "Hoory", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Amir", |
|
"middle": [], |
|
"last": "Feder", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avichai", |
|
"middle": [], |
|
"last": "Tendler", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sofia", |
|
"middle": [], |
|
"last": "Erell", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Alon", |
|
"middle": [], |
|
"last": "Peled-Cohen", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Itay", |
|
"middle": [], |
|
"last": "Laish", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hootan", |
|
"middle": [], |
|
"last": "Nakhost", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Uri", |
|
"middle": [], |
|
"last": "Stemmer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ayelet", |
|
"middle": [], |
|
"last": "Benjamini", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Avinatan", |
|
"middle": [], |
|
"last": "Hassidim", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Findings of the Association for Computational Linguistics: EMNLP 2021", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1178--1189", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Shlomo Hoory, Amir Feder, Avichai Tendler, Sofia Erell, Alon Peled-Cohen, Itay Laish, Hootan Nakhost, Uri Stemmer, Ayelet Benjamini, Avinatan Hassidim, et al. 2021. Learning and evaluating a differentially pri- vate pre-trained language model. In Findings of the Association for Computational Linguistics: EMNLP 2021, pages 1178-1189.", |
|
"links": null |
|
}, |
|
"BIBREF11": { |
|
"ref_id": "b11", |
|
"title": "Ashwin Machanavajjhala, and Sharad Mehrotra", |
|
"authors": [ |
|
{ |
|
"first": "Ios", |
|
"middle": [], |
|
"last": "Kotsogiannis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Stelios", |
|
"middle": [], |
|
"last": "Doudalis", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Sam", |
|
"middle": [], |
|
"last": "Haney", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "2020 IEEE 36th International Conference on Data Engineering (ICDE)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "493--504", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Ios Kotsogiannis, Stelios Doudalis, Sam Haney, Ash- win Machanavajjhala, and Sharad Mehrotra. 2020. One-sided differential privacy. In 2020 IEEE 36th In- ternational Conference on Data Engineering (ICDE), pages 493-504. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF12": { |
|
"ref_id": "b12", |
|
"title": "Large language models can be strong differentially private learners", |
|
"authors": [ |
|
{ |
|
"first": "Xuechen", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Florian", |
|
"middle": [], |
|
"last": "Tramer", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Percy", |
|
"middle": [], |
|
"last": "Liang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Tatsunori", |
|
"middle": [], |
|
"last": "Hashimoto", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Xuechen Li, Florian Tramer, Percy Liang, and Tat- sunori Hashimoto. 2021. Large language mod- els can be strong differentially private learners. https://openreview.net/forum?id=bVuP3ltATMz.", |
|
"links": null |
|
}, |
|
"BIBREF13": { |
|
"ref_id": "b13", |
|
"title": "Learning differentially private recurrent language models", |
|
"authors": [ |
|
{ |
|
"first": "Daniel", |
|
"middle": [], |
|
"last": "H Brendan Mcmahan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kunal", |
|
"middle": [], |
|
"last": "Ramage", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Li", |
|
"middle": [], |
|
"last": "Talwar", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2018, |
|
"venue": "International Conference on Learning Representations", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "H Brendan McMahan, Daniel Ramage, Kunal Talwar, and Li Zhang. 2018. Learning differentially private recurrent language models. In International Confer- ence on Learning Representations.", |
|
"links": null |
|
}, |
|
"BIBREF14": { |
|
"ref_id": "b14", |
|
"title": "Pointer sentinel mixture models", |
|
"authors": [ |
|
{ |
|
"first": "Stephen", |
|
"middle": [], |
|
"last": "Merity", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Caiming", |
|
"middle": [], |
|
"last": "Xiong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "James", |
|
"middle": [], |
|
"last": "Bradbury", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Richard", |
|
"middle": [], |
|
"last": "Socher", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2016, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Stephen Merity, Caiming Xiong, James Bradbury, and Richard Socher. 2016. Pointer sentinel mixture mod- els. https://openreview.net/forum?id=Byj72udxe.", |
|
"links": null |
|
}, |
|
"BIBREF15": { |
|
"ref_id": "b15", |
|
"title": "Sensitive Information Detection: Recursive Neural Networks for Encoding Context", |
|
"authors": [], |
|
"year": 2020, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Jan Neerbek. 2020. Sensitive Information Detection: Recursive Neural Networks for Encoding Context. Ph.D. thesis, Aarhus University.", |
|
"links": null |
|
}, |
|
"BIBREF16": { |
|
"ref_id": "b16", |
|
"title": "Natural language understanding with privacy-preserving bert", |
|
"authors": [ |
|
{ |
|
"first": "Chen", |
|
"middle": [], |
|
"last": "Qu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Weize", |
|
"middle": [], |
|
"last": "Kong", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Liu", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Mingyang", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Michael", |
|
"middle": [], |
|
"last": "Bendersky", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Marc", |
|
"middle": [], |
|
"last": "Najork", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "Proceedings of the 30th ACM International Conference on Information & Knowledge Management", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "1488--1497", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Chen Qu, Weize Kong, Liu Yang, Mingyang Zhang, Michael Bendersky, and Marc Najork. 2021. Natural language understanding with privacy-preserving bert. In Proceedings of the 30th ACM International Con- ference on Information & Knowledge Management, pages 1488-1497.", |
|
"links": null |
|
}, |
|
"BIBREF17": { |
|
"ref_id": "b17", |
|
"title": "Language models are unsupervised multitask learners", |
|
"authors": [ |
|
{ |
|
"first": "Alec", |
|
"middle": [], |
|
"last": "Radford", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jeffrey", |
|
"middle": [], |
|
"last": "Wu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Rewon", |
|
"middle": [], |
|
"last": "Child", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "David", |
|
"middle": [], |
|
"last": "Luan", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Dario", |
|
"middle": [], |
|
"last": "Amodei", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ilya", |
|
"middle": [], |
|
"last": "Sutskever", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "OpenAI blog", |
|
"volume": "1", |
|
"issue": "8", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Alec Radford, Jeffrey Wu, Rewon Child, David Luan, Dario Amodei, Ilya Sutskever, et al. 2019. Language models are unsupervised multitask learners. OpenAI blog, 1(8):9.", |
|
"links": null |
|
}, |
|
"BIBREF18": { |
|
"ref_id": "b18", |
|
"title": "Selective differential privacy for language modeling", |
|
"authors": [ |
|
{ |
|
"first": "Weiyan", |
|
"middle": [], |
|
"last": "Shi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Aiqi", |
|
"middle": [], |
|
"last": "Cui", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Evan", |
|
"middle": [], |
|
"last": "Li", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Ruoxi", |
|
"middle": [], |
|
"last": "Jia", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhou", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": { |
|
"arXiv": [ |
|
"arXiv:2108.12944" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Weiyan Shi, Aiqi Cui, Evan Li, Ruoxi Jia, and Zhou Yu. 2021. Selective differential privacy for language modeling. arXiv preprint arXiv:2108.12944.", |
|
"links": null |
|
}, |
|
"BIBREF19": { |
|
"ref_id": "b19", |
|
"title": "Privacybot: Detecting privacy sensitive information in unstructured texts", |
|
"authors": [ |
|
{ |
|
"first": "Jetzabel", |
|
"middle": [], |
|
"last": "Welderufael B Tesfay", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kai", |
|
"middle": [], |
|
"last": "Serna", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "", |
|
"middle": [], |
|
"last": "Rannenberg", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "Sixth International Conference on Social Networks Analysis, Management and Security (SNAMS)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "53--60", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Welderufael B Tesfay, Jetzabel Serna, and Kai Rannen- berg. 2019. Privacybot: Detecting privacy sensitive information in unstructured texts. In 2019 Sixth In- ternational Conference on Social Networks Analysis, Management and Security (SNAMS), pages 53-60. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF20": { |
|
"ref_id": "b20", |
|
"title": "Detecting sensitive information of unstructured text using convolutional neural network", |
|
"authors": [ |
|
{ |
|
"first": "Guosheng", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunhao", |
|
"middle": [], |
|
"last": "Qi", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hai", |
|
"middle": [], |
|
"last": "Yu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Shengwei", |
|
"middle": [], |
|
"last": "Xu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Chunlu", |
|
"middle": [], |
|
"last": "Zhao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jing", |
|
"middle": [], |
|
"last": "Yuan", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2019, |
|
"venue": "2019 International Conference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC)", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "474--479", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Guosheng Xu, Chunhao Qi, Hai Yu, Shengwei Xu, Chunlu Zhao, and Jing Yuan. 2019. Detecting sen- sitive information of unstructured text using convo- lutional neural network. In 2019 International Con- ference on Cyber-Enabled Distributed Computing and Knowledge Discovery (CyberC), pages 474-479. IEEE.", |
|
"links": null |
|
}, |
|
"BIBREF21": { |
|
"ref_id": "b21", |
|
"title": "Pangu: Large-scale autoregressive pretrained chinese language models with auto-parallel computation", |
|
"authors": [ |
|
{ |
|
"first": "Wei", |
|
"middle": [], |
|
"last": "Zeng", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaozhe", |
|
"middle": [], |
|
"last": "Ren", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Teng", |
|
"middle": [], |
|
"last": "Su", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Hui", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Yi", |
|
"middle": [], |
|
"last": "Liao", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhiwei", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xin", |
|
"middle": [], |
|
"last": "Jiang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Zhenzhang", |
|
"middle": [], |
|
"last": "Yang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Kaisheng", |
|
"middle": [], |
|
"last": "Wang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Xiaoda", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2021, |
|
"venue": "", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "", |
|
"other_ids": {}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Wei Zeng, Xiaozhe Ren, Teng Su, Hui Wang, Yi Liao, Zhiwei Wang, Xin Jiang, ZhenZhang Yang, Kaisheng Wang, Xiaoda Zhang, et al. 2021. Pangu: Large-scale autoregressive pretrained chi- nese language models with auto-parallel computation. https://openreview.net/forum?id=-AArJ2Qrh38.", |
|
"links": null |
|
}, |
|
"BIBREF22": { |
|
"ref_id": "b22", |
|
"title": "BERT-XML: Large scale automated ICD coding using BERT pretraining", |
|
"authors": [ |
|
{ |
|
"first": "Zachariah", |
|
"middle": [], |
|
"last": "Zhang", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Jingshu", |
|
"middle": [], |
|
"last": "Liu", |
|
"suffix": "" |
|
}, |
|
{ |
|
"first": "Narges", |
|
"middle": [], |
|
"last": "Razavian", |
|
"suffix": "" |
|
} |
|
], |
|
"year": 2020, |
|
"venue": "Proceedings of the 3rd Clinical Natural Language Processing Workshop", |
|
"volume": "", |
|
"issue": "", |
|
"pages": "24--34", |
|
"other_ids": { |
|
"DOI": [ |
|
"10.18653/v1/2020.clinicalnlp-1.3" |
|
] |
|
}, |
|
"num": null, |
|
"urls": [], |
|
"raw_text": "Zachariah Zhang, Jingshu Liu, and Narges Razavian. 2020. BERT-XML: Large scale automated ICD cod- ing using BERT pretraining. In Proceedings of the 3rd Clinical Natural Language Processing Workshop, pages 24-34, Online. Association for Computational Linguistics.", |
|
"links": null |
|
} |
|
}, |
|
"ref_entries": {} |
|
} |
|
} |