Duplicate from openai/openai-detector
Browse filesCo-authored-by: Adrien <[email protected]>
- .gitattributes +34 -0
- .gitignore +2 -0
- Dockerfile +41 -0
- README.md +53 -0
- baseline.py +57 -0
- detection.md +50 -0
- detector-base.pt +3 -0
- detector/README.md +51 -0
- detector/__pycache__/server.cpython-37.pyc +0 -0
- detector/dataset.py +86 -0
- detector/download.py +49 -0
- detector/index.html +158 -0
- detector/server.py +155 -0
- detector/train.py +305 -0
- detector/utils.py +62 -0
- download_dataset.py +29 -0
- images/detection_by_length.png +0 -0
- images/parts_of_speech.png +0 -0
- images/self_detection_k40.png +0 -0
- images/self_detection_t1.png +0 -0
- post_endpoint.patch +0 -0
- requirements.txt +37 -0
.gitattributes
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.mypy_cache/
|
2 |
+
data/
|
Dockerfile
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# syntax=docker/dockerfile:1.4
|
2 |
+
FROM nvidia/cuda:11.3.1-cudnn8-devel-ubuntu18.04
|
3 |
+
# BEGIN Static part
|
4 |
+
ENV DEBIAN_FRONTEND=noninteractive \
|
5 |
+
TZ=Europe/Paris
|
6 |
+
|
7 |
+
RUN apt-get update && apt-get install -y \
|
8 |
+
git \
|
9 |
+
make build-essential libssl-dev zlib1g-dev \
|
10 |
+
libbz2-dev libreadline-dev libsqlite3-dev wget curl llvm \
|
11 |
+
libncursesw5-dev xz-utils tk-dev libxml2-dev libxmlsec1-dev libffi-dev liblzma-dev git-lfs \
|
12 |
+
ffmpeg libsm6 libxext6 cmake libgl1-mesa-glx \
|
13 |
+
&& rm -rf /var/lib/apt/lists/* \
|
14 |
+
&& git lfs install
|
15 |
+
|
16 |
+
# User
|
17 |
+
RUN useradd -m -u 1000 user
|
18 |
+
USER user
|
19 |
+
ENV HOME=/home/user \
|
20 |
+
PATH=/home/user/.local/bin:$PATH
|
21 |
+
WORKDIR /home/user/app
|
22 |
+
|
23 |
+
# Pyenv
|
24 |
+
RUN curl https://pyenv.run | bash
|
25 |
+
ENV PATH=$HOME/.pyenv/shims:$HOME/.pyenv/bin:$PATH
|
26 |
+
|
27 |
+
# Python
|
28 |
+
RUN pyenv install 3.7.5 && \
|
29 |
+
pyenv global 3.7.5 && \
|
30 |
+
pyenv rehash && \
|
31 |
+
pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
32 |
+
pip install --no-cache-dir \
|
33 |
+
datasets \
|
34 |
+
huggingface-hub "protobuf<4" "click<8.1"
|
35 |
+
|
36 |
+
COPY --link --chown=1000 requirements.txt /home/user/app/requirements.txt
|
37 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
38 |
+
|
39 |
+
COPY --link --chown=1000 ./ /home/user/app
|
40 |
+
|
41 |
+
CMD ["python", "-m", "detector.server", "detector-base.pt", "--port=7860"]
|
README.md
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
title: openai-detector
|
3 |
+
emoji: 👁️
|
4 |
+
colorFrom: blue
|
5 |
+
colorTo: red
|
6 |
+
sdk: docker
|
7 |
+
duplicated_from: openai/openai-detector
|
8 |
+
---
|
9 |
+
# gpt-2-output-dataset
|
10 |
+
|
11 |
+
This dataset contains:
|
12 |
+
- 250K documents from the WebText test set
|
13 |
+
- For each GPT-2 model (trained on the WebText training set), 250K random samples (temperature 1, no truncation) and 250K samples generated with Top-K 40 truncation
|
14 |
+
|
15 |
+
We look forward to the research produced using this data!
|
16 |
+
|
17 |
+
### Download
|
18 |
+
|
19 |
+
For each model, we have a training split of 250K generated examples, as well as validation and test splits of 5K examples.
|
20 |
+
|
21 |
+
All data is located in Google Cloud Storage, under the directory `gs://gpt-2/output-dataset/v1`.
|
22 |
+
|
23 |
+
There, you will find files:
|
24 |
+
|
25 |
+
- `webtext.${split}.jsonl`
|
26 |
+
- `small-117M.${split}.jsonl`
|
27 |
+
- `small-117M-k40.${split}.jsonl`
|
28 |
+
- `medium-345M.${split}.jsonl`
|
29 |
+
- `medium-345M-k40.${split}.jsonl`
|
30 |
+
- `large-762M.${split}.jsonl`
|
31 |
+
- `large-762M-k40.${split}.jsonl`
|
32 |
+
- `xl-1542M.${split}.jsonl`
|
33 |
+
- `xl-1542M-k40.${split}.jsonl`
|
34 |
+
|
35 |
+
where split is one of `train`, `test`, and `valid`.
|
36 |
+
|
37 |
+
We've provided a script to download all of them, in `download_dataset.py`.
|
38 |
+
|
39 |
+
#### Finetuned model samples
|
40 |
+
|
41 |
+
Additionally, we encourage research on detection of finetuned models. We have released data under `gs://gpt-2/output-dataset/v1-amazonfinetune/` with samples from a GPT-2 full model finetuned to output Amazon reviews.
|
42 |
+
|
43 |
+
### Detectability baselines
|
44 |
+
|
45 |
+
We're interested in seeing research in detectability of GPT-2 model family generations.
|
46 |
+
|
47 |
+
We provide some [initial analysis](detection.md) of two baselines, as well as [code](./baseline.py) for the better baseline.
|
48 |
+
|
49 |
+
Overall, we are able to achieve accuracies in the mid-90s for Top-K 40 generations, and mid-70s to high-80s (depending on model size) for random generations. We also find some evidence that adversaries can evade detection via finetuning from released models.
|
50 |
+
|
51 |
+
### Data removal requests
|
52 |
+
|
53 |
+
If you believe your work is included in WebText and would like us to remove it, please let us know at [email protected].
|
baseline.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import json
|
3 |
+
|
4 |
+
import fire
|
5 |
+
import numpy as np
|
6 |
+
from scipy import sparse
|
7 |
+
|
8 |
+
from sklearn.model_selection import PredefinedSplit, GridSearchCV
|
9 |
+
from sklearn.linear_model import LogisticRegression
|
10 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
11 |
+
|
12 |
+
def _load_split(data_dir, source, split, n=np.inf):
|
13 |
+
path = os.path.join(data_dir, f'{source}.{split}.jsonl')
|
14 |
+
texts = []
|
15 |
+
for i, line in enumerate(open(path)):
|
16 |
+
if i >= n:
|
17 |
+
break
|
18 |
+
texts.append(json.loads(line)['text'])
|
19 |
+
return texts
|
20 |
+
|
21 |
+
def load_split(data_dir, source, split, n=np.inf):
|
22 |
+
webtext = _load_split(data_dir, 'webtext', split, n=n//2)
|
23 |
+
gen = _load_split(data_dir, source, split, n=n//2)
|
24 |
+
texts = webtext+gen
|
25 |
+
labels = [0]*len(webtext)+[1]*len(gen)
|
26 |
+
return texts, labels
|
27 |
+
|
28 |
+
def main(data_dir, log_dir, source='xl-1542M-k40', n_train=500000, n_valid=10000, n_jobs=None, verbose=False):
|
29 |
+
train_texts, train_labels = load_split(data_dir, source, 'train', n=n_train)
|
30 |
+
valid_texts, valid_labels = load_split(data_dir, source, 'valid', n=n_valid)
|
31 |
+
test_texts, test_labels = load_split(data_dir, source, 'test')
|
32 |
+
|
33 |
+
vect = TfidfVectorizer(ngram_range=(1, 2), min_df=5, max_features=2**21)
|
34 |
+
train_features = vect.fit_transform(train_texts)
|
35 |
+
valid_features = vect.transform(valid_texts)
|
36 |
+
test_features = vect.transform(test_texts)
|
37 |
+
|
38 |
+
model = LogisticRegression(solver='liblinear')
|
39 |
+
params = {'C': [1/64, 1/32, 1/16, 1/8, 1/4, 1/2, 1, 2, 4, 8, 16, 32, 64]}
|
40 |
+
split = PredefinedSplit([-1]*n_train+[0]*n_valid)
|
41 |
+
search = GridSearchCV(model, params, cv=split, n_jobs=n_jobs, verbose=verbose, refit=False)
|
42 |
+
search.fit(sparse.vstack([train_features, valid_features]), train_labels+valid_labels)
|
43 |
+
model = model.set_params(**search.best_params_)
|
44 |
+
model.fit(train_features, train_labels)
|
45 |
+
valid_accuracy = model.score(valid_features, valid_labels)*100.
|
46 |
+
test_accuracy = model.score(test_features, test_labels)*100.
|
47 |
+
data = {
|
48 |
+
'source':source,
|
49 |
+
'n_train':n_train,
|
50 |
+
'valid_accuracy':valid_accuracy,
|
51 |
+
'test_accuracy':test_accuracy
|
52 |
+
}
|
53 |
+
print(data)
|
54 |
+
json.dump(data, open(os.path.join(log_dir, f'{source}.json'), 'w'))
|
55 |
+
|
56 |
+
if __name__ == '__main__':
|
57 |
+
fire.Fire(main)
|
detection.md
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
We encourage you to try improving our baselines. Please let us know if you have questions or find any interesting results!
|
2 |
+
|
3 |
+
## Simple baseline
|
4 |
+
|
5 |
+
We've provided a starter baseline which trains a logistic regression detector on TF-IDF unigram and bigram features, in [`baseline.py`](./baseline.py).
|
6 |
+
|
7 |
+
### Initial Analysis
|
8 |
+
|
9 |
+
The baseline achieves the following accuracies:
|
10 |
+
|
11 |
+
| Model | Temperature 1 | Top-K 40 |
|
12 |
+
| ----- | ------ | ------ |
|
13 |
+
| 117M | 88.29% | 96.79% |
|
14 |
+
| 345M | 88.94% | 95.22% |
|
15 |
+
| 762M | 77.16% | 94.43% |
|
16 |
+
| 1542M | 74.31% | 92.69% |
|
17 |
+
|
18 |
+
<img src="images/detection_by_length.png" width="475" height="335" title="Impact of Document Length">
|
19 |
+
|
20 |
+
Unsurprisingly, shorter documents are harder to detect and performance improves gradually with length. Accuracy of detection of short documents of 500 characters (a long paragraph) is about 15% lower.
|
21 |
+
|
22 |
+
<img src="images/parts_of_speech.png" width="482" height="300" title="Part of Speech Analysis">
|
23 |
+
|
24 |
+
Truncated sampling, which is commonly used for high-quality generations from the GPT-2 model family, results in a shift in the part of speech distribution of the generated text compared to real text. A clear example is the underuse of proper nouns and overuse of pronouns which are more generic. This shift contributes to the 8% to 18% higher detection rate of Top-K samples compared to random samples across models.
|
25 |
+
|
26 |
+
### Finetuning
|
27 |
+
|
28 |
+
When run on samples from the finetuned GPT-2 full model, detection rate falls from 92.7% to 70.2% for Top-K 40 generations. Note that about half of this drop is accounted for by length, since Amazon reviews are shorter than WebText documents.
|
29 |
+
|
30 |
+
## "Zero-shot" baseline
|
31 |
+
|
32 |
+
We attempt a second baseline which uses a language model to evaluate total log probability, and thresholds based on this probability. This baseline underperforms relative to the simple baselinie. However, we are interested in further variants, such as binning per-token log probabilities.
|
33 |
+
|
34 |
+
### Initial analysis
|
35 |
+
|
36 |
+
Here, we show results of log-prob based detection for both standard (t=1) and Top-K 40 generations.
|
37 |
+
<img src="images/self_detection_t1.png" width="300" height="300" title="Accuracy with standard (t=1) generations">
|
38 |
+
<img src="images/self_detection_k40.png" width="300" height="300" title="Accuracy with Top-K 40 generations">
|
39 |
+
|
40 |
+
The main result is that GPT-2 detects itself 81.8% of the time in the easy case of Top-K 40 generations. This is pretty constant across model sizes. All underperform relative to the simple baseline.
|
41 |
+
|
42 |
+
For random samples, results are unsurprising. Bigger models are better able to realize that generated text is still kind of weird and "random". Detection rates also go down as generators get better.
|
43 |
+
|
44 |
+
For Top-K 40, results are perhaps more surprising. Using a bigger model as a discriminator does not really improve detection rates across the board (the smallest GPT-2 model does as well at detecting full GPT-2 as full GPT-2), and a bigger model does not "detect down well" - that is, full GPT-2 is actually kind of bad at detecting an adversary using small GPT-2.
|
45 |
+
|
46 |
+
An important difference is that while in the random samples case, generations are less likely than real data, in the Top-K 40 case, they are more likely.
|
47 |
+
|
48 |
+
### Finetuning
|
49 |
+
|
50 |
+
When detecting samples from our finetuned GPT-2 full model using GPT-2 full, we observe a 63.2% detection rate on random samples (drop of 13%) and 76.2% detection rate with Top-K 40 samples (drop of 5.6%)
|
detector-base.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c74935bd6568940038e6bfcc9c90bf821d7ae4163ebf2327b73db2f641376376
|
3 |
+
size 501001061
|
detector/README.md
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
GPT-2 Output Detector
|
2 |
+
=====================
|
3 |
+
|
4 |
+
This directory contains the code for working with the GPT-2 output detector model, obtained by fine-tuning a
|
5 |
+
[RoBERTa model](https://ai.facebook.com/blog/roberta-an-optimized-method-for-pretraining-self-supervised-nlp-systems/)
|
6 |
+
with [the outputs of the 1.5B-parameter GPT-2 model](https://github.com/openai/gpt-2-output-dataset).
|
7 |
+
For motivations and discussions regarding the release of this detector model, please check out
|
8 |
+
[our blog post](https://openai.com/blog/gpt-2-1-5b-release/) and [report](https://d4mucfpksywv.cloudfront.net/papers/GPT_2_Report.pdf).
|
9 |
+
|
10 |
+
## Downloading a pre-trained detector model
|
11 |
+
|
12 |
+
Download the weights for the fine-tuned `roberta-base` model (478 MB):
|
13 |
+
|
14 |
+
```bash
|
15 |
+
wget https://storage.googleapis.com/gpt-2/detector-models/v1/detector-base.pt
|
16 |
+
```
|
17 |
+
|
18 |
+
or `roberta-large` model (1.5 GB):
|
19 |
+
|
20 |
+
```bash
|
21 |
+
wget https://storage.googleapis.com/gpt-2/detector-models/v1/detector-large.pt
|
22 |
+
```
|
23 |
+
|
24 |
+
These RoBERTa-based models are fine-tuned with a mixture of temperature-1 and nucleus sampling outputs,
|
25 |
+
which should generalize well to outputs generated using different sampling methods.
|
26 |
+
|
27 |
+
## Running a detector model
|
28 |
+
|
29 |
+
You can launch a web UI in which you can enter a text and see the detector model's prediction
|
30 |
+
on whether or not it was generated by a GPT-2 model.
|
31 |
+
|
32 |
+
```bash
|
33 |
+
# (on the top-level directory of this repository)
|
34 |
+
pip install -r requirements.txt
|
35 |
+
python -m detector.server detector-base.pt
|
36 |
+
```
|
37 |
+
|
38 |
+
After the script says "Ready to serve", nagivate to http://localhost:8080 to view the UI.
|
39 |
+
|
40 |
+
## Training a new detector model
|
41 |
+
|
42 |
+
You can use the provided training script to train a detector model on a new set of datasets.
|
43 |
+
We recommend using a GPU machine for this task.
|
44 |
+
|
45 |
+
```bash
|
46 |
+
# (on the top-level directory of this repository)
|
47 |
+
pip install -r requirements.txt
|
48 |
+
python -m detector.train
|
49 |
+
```
|
50 |
+
|
51 |
+
The training script supports a number of different options; append `--help` to the command above for usage.
|
detector/__pycache__/server.cpython-37.pyc
ADDED
Binary file (4.78 kB). View file
|
|
detector/dataset.py
ADDED
@@ -0,0 +1,86 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import numpy as np
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
import torch
|
6 |
+
from torch.utils.data import Dataset
|
7 |
+
from tqdm import tqdm
|
8 |
+
from transformers import PreTrainedTokenizer
|
9 |
+
|
10 |
+
from .download import download
|
11 |
+
|
12 |
+
|
13 |
+
def load_texts(data_file, expected_size=None):
|
14 |
+
texts = []
|
15 |
+
|
16 |
+
for line in tqdm(open(data_file), total=expected_size, desc=f'Loading {data_file}'):
|
17 |
+
texts.append(json.loads(line)['text'])
|
18 |
+
|
19 |
+
return texts
|
20 |
+
|
21 |
+
|
22 |
+
class Corpus:
|
23 |
+
def __init__(self, name, data_dir='data', skip_train=False):
|
24 |
+
download(name, data_dir=data_dir)
|
25 |
+
self.name = name
|
26 |
+
self.train = load_texts(f'{data_dir}/{name}.train.jsonl', expected_size=250000) if not skip_train else None
|
27 |
+
self.test = load_texts(f'{data_dir}/{name}.test.jsonl', expected_size=5000)
|
28 |
+
self.valid = load_texts(f'{data_dir}/{name}.valid.jsonl', expected_size=5000)
|
29 |
+
|
30 |
+
|
31 |
+
class EncodedDataset(Dataset):
|
32 |
+
def __init__(self, real_texts: List[str], fake_texts: List[str], tokenizer: PreTrainedTokenizer,
|
33 |
+
max_sequence_length: int = None, min_sequence_length: int = None, epoch_size: int = None,
|
34 |
+
token_dropout: float = None, seed: int = None):
|
35 |
+
self.real_texts = real_texts
|
36 |
+
self.fake_texts = fake_texts
|
37 |
+
self.tokenizer = tokenizer
|
38 |
+
self.max_sequence_length = max_sequence_length
|
39 |
+
self.min_sequence_length = min_sequence_length
|
40 |
+
self.epoch_size = epoch_size
|
41 |
+
self.token_dropout = token_dropout
|
42 |
+
self.random = np.random.RandomState(seed)
|
43 |
+
|
44 |
+
def __len__(self):
|
45 |
+
return self.epoch_size or len(self.real_texts) + len(self.fake_texts)
|
46 |
+
|
47 |
+
def __getitem__(self, index):
|
48 |
+
if self.epoch_size is not None:
|
49 |
+
label = self.random.randint(2)
|
50 |
+
texts = [self.fake_texts, self.real_texts][label]
|
51 |
+
text = texts[self.random.randint(len(texts))]
|
52 |
+
else:
|
53 |
+
if index < len(self.real_texts):
|
54 |
+
text = self.real_texts[index]
|
55 |
+
label = 1
|
56 |
+
else:
|
57 |
+
text = self.fake_texts[index - len(self.real_texts)]
|
58 |
+
label = 0
|
59 |
+
|
60 |
+
tokens = self.tokenizer.encode(text)
|
61 |
+
|
62 |
+
if self.max_sequence_length is None:
|
63 |
+
tokens = tokens[:self.tokenizer.max_len - 2]
|
64 |
+
else:
|
65 |
+
output_length = min(len(tokens), self.max_sequence_length)
|
66 |
+
if self.min_sequence_length:
|
67 |
+
output_length = self.random.randint(min(self.min_sequence_length, len(tokens)), output_length + 1)
|
68 |
+
start_index = 0 if len(tokens) <= output_length else self.random.randint(0, len(tokens) - output_length + 1)
|
69 |
+
end_index = start_index + output_length
|
70 |
+
tokens = tokens[start_index:end_index]
|
71 |
+
|
72 |
+
if self.token_dropout:
|
73 |
+
dropout_mask = self.random.binomial(1, self.token_dropout, len(tokens)).astype(np.bool)
|
74 |
+
tokens = np.array(tokens)
|
75 |
+
tokens[dropout_mask] = self.tokenizer.unk_token_id
|
76 |
+
tokens = tokens.tolist()
|
77 |
+
|
78 |
+
if self.max_sequence_length is None or len(tokens) == self.max_sequence_length:
|
79 |
+
mask = torch.ones(len(tokens) + 2)
|
80 |
+
return torch.tensor([self.tokenizer.bos_token_id] + tokens + [self.tokenizer.eos_token_id]), mask, label
|
81 |
+
|
82 |
+
padding = [self.tokenizer.pad_token_id] * (self.max_sequence_length - len(tokens))
|
83 |
+
tokens = torch.tensor([self.tokenizer.bos_token_id] + tokens + [self.tokenizer.eos_token_id] + padding)
|
84 |
+
mask = torch.ones(tokens.shape[0])
|
85 |
+
mask[-len(padding):] = 0
|
86 |
+
return tokens, mask, label
|
detector/download.py
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
|
3 |
+
import requests
|
4 |
+
import torch.distributed as dist
|
5 |
+
from tqdm import tqdm
|
6 |
+
|
7 |
+
from .utils import distributed
|
8 |
+
|
9 |
+
ALL_DATASETS = [
|
10 |
+
'webtext',
|
11 |
+
'small-117M', 'small-117M-k40', 'small-117M-nucleus',
|
12 |
+
'medium-345M', 'medium-345M-k40', 'medium-345M-nucleus',
|
13 |
+
'large-762M', 'large-762M-k40', 'large-762M-nucleus',
|
14 |
+
'xl-1542M', 'xl-1542M-k40', 'xl-1542M-nucleus'
|
15 |
+
]
|
16 |
+
|
17 |
+
|
18 |
+
def download(*datasets, data_dir='data'):
|
19 |
+
os.makedirs(data_dir, exist_ok=True)
|
20 |
+
|
21 |
+
if distributed() and dist.get_rank() > 0:
|
22 |
+
dist.barrier()
|
23 |
+
|
24 |
+
for ds in datasets:
|
25 |
+
assert ds in ALL_DATASETS, f'Unknown dataset {ds}'
|
26 |
+
|
27 |
+
for split in ['train', 'valid', 'test']:
|
28 |
+
filename = ds + "." + split + '.jsonl'
|
29 |
+
output_file = os.path.join(data_dir, filename)
|
30 |
+
if os.path.isfile(output_file):
|
31 |
+
continue
|
32 |
+
|
33 |
+
r = requests.get("https://storage.googleapis.com/gpt-2/output-dataset/v1/" + filename, stream=True)
|
34 |
+
|
35 |
+
with open(output_file, 'wb') as f:
|
36 |
+
file_size = int(r.headers["content-length"])
|
37 |
+
chunk_size = 1000
|
38 |
+
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
|
39 |
+
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
|
40 |
+
for chunk in r.iter_content(chunk_size=chunk_size):
|
41 |
+
f.write(chunk)
|
42 |
+
pbar.update(chunk_size)
|
43 |
+
|
44 |
+
if distributed() and dist.get_rank() == 0:
|
45 |
+
dist.barrier()
|
46 |
+
|
47 |
+
|
48 |
+
if __name__ == '__main__':
|
49 |
+
download(*ALL_DATASETS)
|
detector/index.html
ADDED
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!doctype html>
|
2 |
+
<html>
|
3 |
+
<head>
|
4 |
+
<title>GPT-2 Output Detector</title>
|
5 |
+
<meta charset="utf-8">
|
6 |
+
<style type="text/css">
|
7 |
+
* {
|
8 |
+
box-sizing: border-box;
|
9 |
+
}
|
10 |
+
|
11 |
+
body {
|
12 |
+
font-family: sans-serif;
|
13 |
+
margin: 0;
|
14 |
+
background: #ffffff;
|
15 |
+
}
|
16 |
+
|
17 |
+
h1 {
|
18 |
+
font-weight: lighter;
|
19 |
+
}
|
20 |
+
|
21 |
+
a {
|
22 |
+
text-decoration: none;
|
23 |
+
color: #666;
|
24 |
+
}
|
25 |
+
|
26 |
+
a:hover {
|
27 |
+
text-decoration: underline;
|
28 |
+
}
|
29 |
+
|
30 |
+
#container {
|
31 |
+
margin: auto;
|
32 |
+
width: 960px;
|
33 |
+
}
|
34 |
+
|
35 |
+
#textbox {
|
36 |
+
font-family: serif;
|
37 |
+
font-size: 16pt;
|
38 |
+
width: 100%;
|
39 |
+
height: 480px;
|
40 |
+
padding: 20px 30px;
|
41 |
+
line-height: 1.6;
|
42 |
+
}
|
43 |
+
|
44 |
+
.bar-row {
|
45 |
+
height: 30px;
|
46 |
+
}
|
47 |
+
#real-percentage {
|
48 |
+
width: 80px;
|
49 |
+
vertical-align: top;
|
50 |
+
}
|
51 |
+
#bar-container {
|
52 |
+
width: 800px;
|
53 |
+
background-color: #ff7674;
|
54 |
+
line-height: 0.5;
|
55 |
+
position:relative;
|
56 |
+
top:6px;
|
57 |
+
}
|
58 |
+
#fake-percentage {
|
59 |
+
width: 80px;
|
60 |
+
vertical-align: top;
|
61 |
+
}
|
62 |
+
#bar {
|
63 |
+
display: inline-block;
|
64 |
+
height: 30px;
|
65 |
+
background-color: #83aaff;
|
66 |
+
}
|
67 |
+
em {
|
68 |
+
font-family: monospace;
|
69 |
+
font-style: normal;
|
70 |
+
}
|
71 |
+
</style>
|
72 |
+
</head>
|
73 |
+
<body>
|
74 |
+
<div id="container">
|
75 |
+
<h1>GPT-2 Output Detector Demo</h1>
|
76 |
+
<p>
|
77 |
+
This is an online demo of the
|
78 |
+
<a href="https://github.com/openai/gpt-2-output-dataset/tree/master/detector">GPT-2 output detector</a>
|
79 |
+
model, based on the <a href="https://github.com/huggingface/transformers/commit/1c542df7e554a2014051dd09becf60f157fed524"><code>🤗/Transformers</code></a>
|
80 |
+
implementation of <a href="https://arxiv.org/abs/1907.11692">RoBERTa</a>.
|
81 |
+
Enter some text in the text box; the predicted probabilities will be displayed below.
|
82 |
+
<u>The results start to get reliable after around 50 tokens.</u>
|
83 |
+
</p>
|
84 |
+
<textarea id="textbox" placeholder="Enter text here"></textarea>
|
85 |
+
<div><table cellspacing="0" cellpadding="0">
|
86 |
+
<tr class="bar-row" style="vertical-align: bottom;">
|
87 |
+
<td style="text-align: left;">Real</td>
|
88 |
+
<td id="message" style="text-align: center;"></td>
|
89 |
+
<td style="text-align: right;">Fake</td>
|
90 |
+
</tr>
|
91 |
+
<tr class="bar-row">
|
92 |
+
<td id="real-percentage" style="text-align: left; vertical-align: bottom;"></td>
|
93 |
+
<td id="bar-container"><div id="bar" style="width: 50%;"></div></td>
|
94 |
+
<td id="fake-percentage" style="text-align: right; vertical-align: bottom;"></td>
|
95 |
+
</tr>
|
96 |
+
</table></div>
|
97 |
+
</div>
|
98 |
+
<script>
|
99 |
+
let textbox = document.getElementById('textbox');
|
100 |
+
let last_submit = null;
|
101 |
+
|
102 |
+
let real_percentage = document.getElementById('real-percentage');
|
103 |
+
let fake_percentage = document.getElementById('fake-percentage');
|
104 |
+
let bar = document.getElementById('bar');
|
105 |
+
let message = document.getElementById('message');
|
106 |
+
|
107 |
+
function update_graph(result) {
|
108 |
+
if (result === null) {
|
109 |
+
real_percentage.innerHTML = '';
|
110 |
+
fake_percentage.innerHTML = '';
|
111 |
+
bar.style.width = '50%';
|
112 |
+
message.innerHTML = '';
|
113 |
+
} else {
|
114 |
+
let percentage = result.real_probability;
|
115 |
+
real_percentage.innerHTML = (100 * percentage).toFixed(2) + '%';
|
116 |
+
fake_percentage.innerHTML = (100 * (1 - percentage)).toFixed(2) + '%';
|
117 |
+
bar.style.width = (100 * percentage).toFixed(2) + '%';
|
118 |
+
if (result.used_tokens === result.all_tokens) {
|
119 |
+
message.innerHTML = `Prediction based on ${result.used_tokens} tokens`;
|
120 |
+
} else {
|
121 |
+
message.innerHTML = `Prediction based on the first ${result.used_tokens} tokens among the total ${result.all_tokens}`;
|
122 |
+
}
|
123 |
+
}
|
124 |
+
}
|
125 |
+
|
126 |
+
textbox.oninput = () => {
|
127 |
+
if (last_submit) {
|
128 |
+
clearTimeout(last_submit);
|
129 |
+
}
|
130 |
+
if (textbox.value.length === 0) {
|
131 |
+
update_graph(null);
|
132 |
+
return;
|
133 |
+
}
|
134 |
+
message.innerText = 'Predicting ...';
|
135 |
+
last_submit = setTimeout(() => {
|
136 |
+
let req = new XMLHttpRequest();
|
137 |
+
if (textbox.value.length === 0) {
|
138 |
+
update_graph(null);
|
139 |
+
return;
|
140 |
+
}
|
141 |
+
req.open('GET', window.location.href + '?' + textbox.value, true);
|
142 |
+
req.onreadystatechange = () => {
|
143 |
+
if (req.readyState !== 4) return;
|
144 |
+
if (req.status !== 200) throw new Error("HTTP status: " + req.status);
|
145 |
+
let result = JSON.parse(req.responseText);
|
146 |
+
update_graph(result);
|
147 |
+
};
|
148 |
+
req.send();
|
149 |
+
}, 1000);
|
150 |
+
|
151 |
+
};
|
152 |
+
|
153 |
+
window.addEventListener('DOMContentLoaded', () => {
|
154 |
+
textbox.focus();
|
155 |
+
});
|
156 |
+
</script>
|
157 |
+
</body>
|
158 |
+
</html>
|
detector/server.py
ADDED
@@ -0,0 +1,155 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
from http.server import HTTPServer, SimpleHTTPRequestHandler
|
4 |
+
from multiprocessing import Process
|
5 |
+
import subprocess
|
6 |
+
from transformers import RobertaForSequenceClassification, RobertaTokenizer
|
7 |
+
import json
|
8 |
+
import fire
|
9 |
+
import torch
|
10 |
+
import re
|
11 |
+
from urllib.parse import urlparse, unquote, parse_qs, urlencode
|
12 |
+
|
13 |
+
model: RobertaForSequenceClassification = None
|
14 |
+
tokenizer: RobertaTokenizer = None
|
15 |
+
device: str = None
|
16 |
+
|
17 |
+
# Remove spaces query params from query
|
18 |
+
regex = r"__theme=(.+)"
|
19 |
+
|
20 |
+
|
21 |
+
def log(*args):
|
22 |
+
print(f"[{os.environ.get('RANK', '')}]", *args, file=sys.stderr)
|
23 |
+
|
24 |
+
|
25 |
+
class RequestHandler(SimpleHTTPRequestHandler):
|
26 |
+
|
27 |
+
def do_POST(self):
|
28 |
+
self.begin_content('application/json,charset=UTF-8')
|
29 |
+
|
30 |
+
content_length = int(self.headers['Content-Length'])
|
31 |
+
if content_length > 0:
|
32 |
+
post_data = self.rfile.read(content_length).decode('utf-8')
|
33 |
+
try:
|
34 |
+
post_data = json.loads(post_data)
|
35 |
+
|
36 |
+
if 'text' not in post_data:
|
37 |
+
self.wfile.write(json.dumps({"error": "missing key 'text'"}).encode('utf-8'))
|
38 |
+
else:
|
39 |
+
all_tokens, used_tokens, fake, real = self.infer(post_data['text'])
|
40 |
+
|
41 |
+
self.wfile.write(json.dumps(dict(
|
42 |
+
all_tokens=all_tokens,
|
43 |
+
used_tokens=used_tokens,
|
44 |
+
real_probability=real,
|
45 |
+
fake_probability=fake
|
46 |
+
)).encode('utf-8'))
|
47 |
+
|
48 |
+
except Exception as e:
|
49 |
+
self.wfile.write(json.dumps({"error": str(e)}).encode('utf-8'))
|
50 |
+
|
51 |
+
def do_GET(self):
|
52 |
+
query = urlparse(self.path).query
|
53 |
+
query = re.sub(regex, "", query, 0, re.MULTILINE)
|
54 |
+
query = unquote(query)
|
55 |
+
|
56 |
+
if not query:
|
57 |
+
self.begin_content('text/html')
|
58 |
+
|
59 |
+
html = os.path.join(os.path.dirname(__file__), 'index.html')
|
60 |
+
self.wfile.write(open(html).read().encode())
|
61 |
+
return
|
62 |
+
|
63 |
+
self.begin_content('application/json;charset=UTF-8')
|
64 |
+
|
65 |
+
all_tokens, used_tokens, fake, real = self.infer(query)
|
66 |
+
|
67 |
+
self.wfile.write(json.dumps(dict(
|
68 |
+
all_tokens=all_tokens,
|
69 |
+
used_tokens=used_tokens,
|
70 |
+
real_probability=real,
|
71 |
+
fake_probability=fake
|
72 |
+
)).encode())
|
73 |
+
|
74 |
+
def infer(self, query):
|
75 |
+
tokens = tokenizer.encode(query)
|
76 |
+
all_tokens = len(tokens)
|
77 |
+
tokens = tokens[:tokenizer.max_len - 2]
|
78 |
+
used_tokens = len(tokens)
|
79 |
+
tokens = torch.tensor([tokenizer.bos_token_id] + tokens + [tokenizer.eos_token_id]).unsqueeze(0)
|
80 |
+
mask = torch.ones_like(tokens)
|
81 |
+
|
82 |
+
with torch.no_grad():
|
83 |
+
logits = model(tokens.to(device), attention_mask=mask.to(device))[0]
|
84 |
+
probs = logits.softmax(dim=-1)
|
85 |
+
|
86 |
+
fake, real = probs.detach().cpu().flatten().numpy().tolist()
|
87 |
+
|
88 |
+
return all_tokens, used_tokens, fake, real
|
89 |
+
|
90 |
+
def begin_content(self, content_type):
|
91 |
+
self.send_response(200)
|
92 |
+
self.send_header('Content-Type', content_type)
|
93 |
+
self.send_header('Access-Control-Allow-Origin', '*')
|
94 |
+
self.end_headers()
|
95 |
+
|
96 |
+
def log_message(self, format, *args):
|
97 |
+
log(format % args)
|
98 |
+
|
99 |
+
|
100 |
+
def serve_forever(server, model, tokenizer, device):
|
101 |
+
log('Process has started; loading the model ...')
|
102 |
+
globals()['model'] = model.to(device)
|
103 |
+
globals()['tokenizer'] = tokenizer
|
104 |
+
globals()['device'] = device
|
105 |
+
|
106 |
+
log(f'Ready to serve at http://localhost:{server.server_address[1]}')
|
107 |
+
server.serve_forever()
|
108 |
+
|
109 |
+
|
110 |
+
def main(checkpoint, port=8080, device='cuda' if torch.cuda.is_available() else 'cpu'):
|
111 |
+
if checkpoint.startswith('gs://'):
|
112 |
+
print(f'Downloading {checkpoint}', file=sys.stderr)
|
113 |
+
subprocess.check_output(['gsutil', 'cp', checkpoint, '.'])
|
114 |
+
checkpoint = os.path.basename(checkpoint)
|
115 |
+
assert os.path.isfile(checkpoint)
|
116 |
+
|
117 |
+
print(f'Loading checkpoint from {checkpoint}')
|
118 |
+
data = torch.load(checkpoint, map_location='cpu')
|
119 |
+
|
120 |
+
model_name = 'roberta-large' if data['args']['large'] else 'roberta-base'
|
121 |
+
model = RobertaForSequenceClassification.from_pretrained(model_name)
|
122 |
+
tokenizer = RobertaTokenizer.from_pretrained(model_name)
|
123 |
+
|
124 |
+
model.load_state_dict(data['model_state_dict'])
|
125 |
+
model.eval()
|
126 |
+
|
127 |
+
print(f'Starting HTTP server on port {port}', file=sys.stderr)
|
128 |
+
server = HTTPServer(('0.0.0.0', port), RequestHandler)
|
129 |
+
|
130 |
+
# avoid calling CUDA API before forking; doing so in a subprocess is fine.
|
131 |
+
num_workers = int(subprocess.check_output([sys.executable, '-c', 'import torch; print(torch.cuda.device_count())']))
|
132 |
+
|
133 |
+
if num_workers <= 1:
|
134 |
+
serve_forever(server, model, tokenizer, device)
|
135 |
+
else:
|
136 |
+
print(f'Launching {num_workers} worker processes...')
|
137 |
+
|
138 |
+
subprocesses = []
|
139 |
+
|
140 |
+
for i in range(num_workers):
|
141 |
+
os.environ['RANK'] = f'{i}'
|
142 |
+
os.environ['CUDA_VISIBLE_DEVICES'] = f'{i}'
|
143 |
+
process = Process(target=serve_forever, args=(server, model, tokenizer, device))
|
144 |
+
process.start()
|
145 |
+
subprocesses.append(process)
|
146 |
+
|
147 |
+
del os.environ['RANK']
|
148 |
+
del os.environ['CUDA_VISIBLE_DEVICES']
|
149 |
+
|
150 |
+
for process in subprocesses:
|
151 |
+
process.join()
|
152 |
+
|
153 |
+
|
154 |
+
if __name__ == '__main__':
|
155 |
+
fire.Fire(main)
|
detector/train.py
ADDED
@@ -0,0 +1,305 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""Training code for the detector model"""
|
2 |
+
|
3 |
+
import argparse
|
4 |
+
import os
|
5 |
+
import subprocess
|
6 |
+
import sys
|
7 |
+
from itertools import count
|
8 |
+
from multiprocessing import Process
|
9 |
+
|
10 |
+
import torch
|
11 |
+
import torch.distributed as dist
|
12 |
+
from torch import nn
|
13 |
+
from torch.nn.parallel import DistributedDataParallel
|
14 |
+
from torch.optim import Adam
|
15 |
+
from torch.utils.data import DataLoader, DistributedSampler, RandomSampler
|
16 |
+
from tqdm import tqdm
|
17 |
+
from transformers import *
|
18 |
+
|
19 |
+
from .dataset import Corpus, EncodedDataset
|
20 |
+
from .download import download
|
21 |
+
from .utils import summary, distributed
|
22 |
+
|
23 |
+
|
24 |
+
def setup_distributed(port=29500):
|
25 |
+
if not dist.is_available() or not torch.cuda.is_available() or torch.cuda.device_count() <= 1:
|
26 |
+
return 0, 1
|
27 |
+
|
28 |
+
if 'MPIR_CVAR_CH3_INTERFACE_HOSTNAME' in os.environ:
|
29 |
+
from mpi4py import MPI
|
30 |
+
mpi_rank = MPI.COMM_WORLD.Get_rank()
|
31 |
+
mpi_size = MPI.COMM_WORLD.Get_size()
|
32 |
+
|
33 |
+
os.environ["MASTER_ADDR"] = '127.0.0.1'
|
34 |
+
os.environ["MASTER_PORT"] = str(port)
|
35 |
+
|
36 |
+
dist.init_process_group(backend="nccl", world_size=mpi_size, rank=mpi_rank)
|
37 |
+
return mpi_rank, mpi_size
|
38 |
+
|
39 |
+
dist.init_process_group(backend="nccl", init_method="env://")
|
40 |
+
return dist.get_rank(), dist.get_world_size()
|
41 |
+
|
42 |
+
|
43 |
+
def load_datasets(data_dir, real_dataset, fake_dataset, tokenizer, batch_size,
|
44 |
+
max_sequence_length, random_sequence_length, epoch_size=None, token_dropout=None, seed=None):
|
45 |
+
if fake_dataset == 'TWO':
|
46 |
+
download(real_dataset, 'xl-1542M', 'xl-1542M-nucleus', data_dir=data_dir)
|
47 |
+
elif fake_dataset == 'THREE':
|
48 |
+
download(real_dataset, 'xl-1542M', 'xl-1542M-k40', 'xl-1542M-nucleus', data_dir=data_dir)
|
49 |
+
else:
|
50 |
+
download(real_dataset, fake_dataset, data_dir=data_dir)
|
51 |
+
|
52 |
+
real_corpus = Corpus(real_dataset, data_dir=data_dir)
|
53 |
+
|
54 |
+
if fake_dataset == "TWO":
|
55 |
+
real_train, real_valid = real_corpus.train * 2, real_corpus.valid * 2
|
56 |
+
fake_corpora = [Corpus(name, data_dir=data_dir) for name in ['xl-1542M', 'xl-1542M-nucleus']]
|
57 |
+
fake_train = sum([corpus.train for corpus in fake_corpora], [])
|
58 |
+
fake_valid = sum([corpus.valid for corpus in fake_corpora], [])
|
59 |
+
elif fake_dataset == "THREE":
|
60 |
+
real_train, real_valid = real_corpus.train * 3, real_corpus.valid * 3
|
61 |
+
fake_corpora = [Corpus(name, data_dir=data_dir) for name in
|
62 |
+
['xl-1542M', 'xl-1542M-k40', 'xl-1542M-nucleus']]
|
63 |
+
fake_train = sum([corpus.train for corpus in fake_corpora], [])
|
64 |
+
fake_valid = sum([corpus.valid for corpus in fake_corpora], [])
|
65 |
+
else:
|
66 |
+
fake_corpus = Corpus(fake_dataset, data_dir=data_dir)
|
67 |
+
|
68 |
+
real_train, real_valid = real_corpus.train, real_corpus.valid
|
69 |
+
fake_train, fake_valid = fake_corpus.train, fake_corpus.valid
|
70 |
+
|
71 |
+
Sampler = DistributedSampler if distributed() and dist.get_world_size() > 1 else RandomSampler
|
72 |
+
|
73 |
+
min_sequence_length = 10 if random_sequence_length else None
|
74 |
+
train_dataset = EncodedDataset(real_train, fake_train, tokenizer, max_sequence_length, min_sequence_length,
|
75 |
+
epoch_size, token_dropout, seed)
|
76 |
+
train_loader = DataLoader(train_dataset, batch_size, sampler=Sampler(train_dataset), num_workers=0)
|
77 |
+
|
78 |
+
validation_dataset = EncodedDataset(real_valid, fake_valid, tokenizer)
|
79 |
+
validation_loader = DataLoader(validation_dataset, batch_size=1, sampler=Sampler(validation_dataset))
|
80 |
+
|
81 |
+
return train_loader, validation_loader
|
82 |
+
|
83 |
+
|
84 |
+
def accuracy_sum(logits, labels):
|
85 |
+
if list(logits.shape) == list(labels.shape) + [2]:
|
86 |
+
# 2-d outputs
|
87 |
+
classification = (logits[..., 0] < logits[..., 1]).long().flatten()
|
88 |
+
else:
|
89 |
+
classification = (logits > 0).long().flatten()
|
90 |
+
assert classification.shape == labels.shape
|
91 |
+
return (classification == labels).float().sum().item()
|
92 |
+
|
93 |
+
|
94 |
+
def train(model: nn.Module, optimizer, device: str, loader: DataLoader, desc='Train'):
|
95 |
+
model.train()
|
96 |
+
|
97 |
+
train_accuracy = 0
|
98 |
+
train_epoch_size = 0
|
99 |
+
train_loss = 0
|
100 |
+
|
101 |
+
with tqdm(loader, desc=desc, disable=distributed() and dist.get_rank() > 0) as loop:
|
102 |
+
for texts, masks, labels in loop:
|
103 |
+
|
104 |
+
texts, masks, labels = texts.to(device), masks.to(device), labels.to(device)
|
105 |
+
batch_size = texts.shape[0]
|
106 |
+
|
107 |
+
optimizer.zero_grad()
|
108 |
+
loss, logits = model(texts, attention_mask=masks, labels=labels)
|
109 |
+
loss.backward()
|
110 |
+
optimizer.step()
|
111 |
+
|
112 |
+
batch_accuracy = accuracy_sum(logits, labels)
|
113 |
+
train_accuracy += batch_accuracy
|
114 |
+
train_epoch_size += batch_size
|
115 |
+
train_loss += loss.item() * batch_size
|
116 |
+
|
117 |
+
loop.set_postfix(loss=loss.item(), acc=train_accuracy / train_epoch_size)
|
118 |
+
|
119 |
+
return {
|
120 |
+
"train/accuracy": train_accuracy,
|
121 |
+
"train/epoch_size": train_epoch_size,
|
122 |
+
"train/loss": train_loss
|
123 |
+
}
|
124 |
+
|
125 |
+
|
126 |
+
def validate(model: nn.Module, device: str, loader: DataLoader, votes=1, desc='Validation'):
|
127 |
+
model.eval()
|
128 |
+
|
129 |
+
validation_accuracy = 0
|
130 |
+
validation_epoch_size = 0
|
131 |
+
validation_loss = 0
|
132 |
+
|
133 |
+
records = [record for v in range(votes) for record in tqdm(loader, desc=f'Preloading data ... {v}',
|
134 |
+
disable=dist.is_available() and dist.get_rank() > 0)]
|
135 |
+
records = [[records[v * len(loader) + i] for v in range(votes)] for i in range(len(loader))]
|
136 |
+
|
137 |
+
with tqdm(records, desc=desc, disable=distributed() and dist.get_rank() > 0) as loop, torch.no_grad():
|
138 |
+
for example in loop:
|
139 |
+
losses = []
|
140 |
+
logit_votes = []
|
141 |
+
|
142 |
+
for texts, masks, labels in example:
|
143 |
+
texts, masks, labels = texts.to(device), masks.to(device), labels.to(device)
|
144 |
+
batch_size = texts.shape[0]
|
145 |
+
|
146 |
+
loss, logits = model(texts, attention_mask=masks, labels=labels)
|
147 |
+
losses.append(loss)
|
148 |
+
logit_votes.append(logits)
|
149 |
+
|
150 |
+
loss = torch.stack(losses).mean(dim=0)
|
151 |
+
logits = torch.stack(logit_votes).mean(dim=0)
|
152 |
+
|
153 |
+
batch_accuracy = accuracy_sum(logits, labels)
|
154 |
+
validation_accuracy += batch_accuracy
|
155 |
+
validation_epoch_size += batch_size
|
156 |
+
validation_loss += loss.item() * batch_size
|
157 |
+
|
158 |
+
loop.set_postfix(loss=loss.item(), acc=validation_accuracy / validation_epoch_size)
|
159 |
+
|
160 |
+
return {
|
161 |
+
"validation/accuracy": validation_accuracy,
|
162 |
+
"validation/epoch_size": validation_epoch_size,
|
163 |
+
"validation/loss": validation_loss
|
164 |
+
}
|
165 |
+
|
166 |
+
|
167 |
+
def _all_reduce_dict(d, device):
|
168 |
+
# wrap in tensor and use reduce to gpu0 tensor
|
169 |
+
output_d = {}
|
170 |
+
for (key, value) in sorted(d.items()):
|
171 |
+
tensor_input = torch.tensor([[value]]).to(device)
|
172 |
+
torch.distributed.all_reduce(tensor_input)
|
173 |
+
output_d[key] = tensor_input.item()
|
174 |
+
return output_d
|
175 |
+
|
176 |
+
|
177 |
+
def run(max_epochs=None,
|
178 |
+
device=None,
|
179 |
+
batch_size=24,
|
180 |
+
max_sequence_length=128,
|
181 |
+
random_sequence_length=False,
|
182 |
+
epoch_size=None,
|
183 |
+
seed=None,
|
184 |
+
data_dir='data',
|
185 |
+
real_dataset='webtext',
|
186 |
+
fake_dataset='xl-1542M-nucleus',
|
187 |
+
token_dropout=None,
|
188 |
+
large=False,
|
189 |
+
learning_rate=2e-5,
|
190 |
+
weight_decay=0,
|
191 |
+
**kwargs):
|
192 |
+
args = locals()
|
193 |
+
rank, world_size = setup_distributed()
|
194 |
+
|
195 |
+
if device is None:
|
196 |
+
device = f'cuda:{rank}' if torch.cuda.is_available() else 'cpu'
|
197 |
+
|
198 |
+
print('rank:', rank, 'world_size:', world_size, 'device:', device)
|
199 |
+
|
200 |
+
import torch.distributed as dist
|
201 |
+
if distributed() and rank > 0:
|
202 |
+
dist.barrier()
|
203 |
+
|
204 |
+
model_name = 'roberta-large' if large else 'roberta-base'
|
205 |
+
tokenization_utils.logger.setLevel('ERROR')
|
206 |
+
tokenizer = RobertaTokenizer.from_pretrained(model_name)
|
207 |
+
model = RobertaForSequenceClassification.from_pretrained(model_name).to(device)
|
208 |
+
|
209 |
+
if rank == 0:
|
210 |
+
summary(model)
|
211 |
+
if distributed():
|
212 |
+
dist.barrier()
|
213 |
+
|
214 |
+
if world_size > 1:
|
215 |
+
model = DistributedDataParallel(model, [rank], output_device=rank, find_unused_parameters=True)
|
216 |
+
|
217 |
+
train_loader, validation_loader = load_datasets(data_dir, real_dataset, fake_dataset, tokenizer, batch_size,
|
218 |
+
max_sequence_length, random_sequence_length, epoch_size,
|
219 |
+
token_dropout, seed)
|
220 |
+
|
221 |
+
optimizer = Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)
|
222 |
+
epoch_loop = count(1) if max_epochs is None else range(1, max_epochs + 1)
|
223 |
+
|
224 |
+
logdir = os.environ.get("OPENAI_LOGDIR", "logs")
|
225 |
+
os.makedirs(logdir, exist_ok=True)
|
226 |
+
|
227 |
+
from torch.utils.tensorboard import SummaryWriter
|
228 |
+
writer = SummaryWriter(logdir) if rank == 0 else None
|
229 |
+
best_validation_accuracy = 0
|
230 |
+
|
231 |
+
for epoch in epoch_loop:
|
232 |
+
if world_size > 1:
|
233 |
+
train_loader.sampler.set_epoch(epoch)
|
234 |
+
validation_loader.sampler.set_epoch(epoch)
|
235 |
+
|
236 |
+
train_metrics = train(model, optimizer, device, train_loader, f'Epoch {epoch}')
|
237 |
+
validation_metrics = validate(model, device, validation_loader)
|
238 |
+
|
239 |
+
combined_metrics = _all_reduce_dict({**validation_metrics, **train_metrics}, device)
|
240 |
+
|
241 |
+
combined_metrics["train/accuracy"] /= combined_metrics["train/epoch_size"]
|
242 |
+
combined_metrics["train/loss"] /= combined_metrics["train/epoch_size"]
|
243 |
+
combined_metrics["validation/accuracy"] /= combined_metrics["validation/epoch_size"]
|
244 |
+
combined_metrics["validation/loss"] /= combined_metrics["validation/epoch_size"]
|
245 |
+
|
246 |
+
if rank == 0:
|
247 |
+
for key, value in combined_metrics.items():
|
248 |
+
writer.add_scalar(key, value, global_step=epoch)
|
249 |
+
|
250 |
+
if combined_metrics["validation/accuracy"] > best_validation_accuracy:
|
251 |
+
best_validation_accuracy = combined_metrics["validation/accuracy"]
|
252 |
+
|
253 |
+
model_to_save = model.module if hasattr(model, 'module') else model
|
254 |
+
torch.save(dict(
|
255 |
+
epoch=epoch,
|
256 |
+
model_state_dict=model_to_save.state_dict(),
|
257 |
+
optimizer_state_dict=optimizer.state_dict(),
|
258 |
+
args=args
|
259 |
+
),
|
260 |
+
os.path.join(logdir, "best-model.pt")
|
261 |
+
)
|
262 |
+
|
263 |
+
|
264 |
+
if __name__ == '__main__':
|
265 |
+
parser = argparse.ArgumentParser()
|
266 |
+
|
267 |
+
parser.add_argument('--max-epochs', type=int, default=None)
|
268 |
+
parser.add_argument('--device', type=str, default=None)
|
269 |
+
parser.add_argument('--batch-size', type=int, default=24)
|
270 |
+
parser.add_argument('--max-sequence-length', type=int, default=128)
|
271 |
+
parser.add_argument('--random-sequence-length', action='store_true')
|
272 |
+
parser.add_argument('--epoch-size', type=int, default=None)
|
273 |
+
parser.add_argument('--seed', type=int, default=None)
|
274 |
+
parser.add_argument('--data-dir', type=str, default='data')
|
275 |
+
parser.add_argument('--real-dataset', type=str, default='webtext')
|
276 |
+
parser.add_argument('--fake-dataset', type=str, default='xl-1542M-k40')
|
277 |
+
parser.add_argument('--token-dropout', type=float, default=None)
|
278 |
+
|
279 |
+
parser.add_argument('--large', action='store_true', help='use the roberta-large model instead of roberta-base')
|
280 |
+
parser.add_argument('--learning-rate', type=float, default=2e-5)
|
281 |
+
parser.add_argument('--weight-decay', type=float, default=0)
|
282 |
+
args = parser.parse_args()
|
283 |
+
|
284 |
+
nproc = int(subprocess.check_output([sys.executable, '-c', "import torch;"
|
285 |
+
"print(torch.cuda.device_count() if torch.cuda.is_available() else 1)"]))
|
286 |
+
if nproc > 1:
|
287 |
+
print(f'Launching {nproc} processes ...', file=sys.stderr)
|
288 |
+
|
289 |
+
os.environ["MASTER_ADDR"] = '127.0.0.1'
|
290 |
+
os.environ["MASTER_PORT"] = str(29500)
|
291 |
+
os.environ['WORLD_SIZE'] = str(nproc)
|
292 |
+
os.environ['OMP_NUM_THREAD'] = str(1)
|
293 |
+
subprocesses = []
|
294 |
+
|
295 |
+
for i in range(nproc):
|
296 |
+
os.environ['RANK'] = str(i)
|
297 |
+
os.environ['LOCAL_RANK'] = str(i)
|
298 |
+
process = Process(target=run, kwargs=vars(args))
|
299 |
+
process.start()
|
300 |
+
subprocesses.append(process)
|
301 |
+
|
302 |
+
for process in subprocesses:
|
303 |
+
process.join()
|
304 |
+
else:
|
305 |
+
run(**vars(args))
|
detector/utils.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
from functools import reduce
|
3 |
+
|
4 |
+
from torch import nn
|
5 |
+
import torch.distributed as dist
|
6 |
+
|
7 |
+
|
8 |
+
def summary(model: nn.Module, file=sys.stdout):
|
9 |
+
def repr(model):
|
10 |
+
# We treat the extra repr like the sub-module, one item per line
|
11 |
+
extra_lines = []
|
12 |
+
extra_repr = model.extra_repr()
|
13 |
+
# empty string will be split into list ['']
|
14 |
+
if extra_repr:
|
15 |
+
extra_lines = extra_repr.split('\n')
|
16 |
+
child_lines = []
|
17 |
+
total_params = 0
|
18 |
+
for key, module in model._modules.items():
|
19 |
+
mod_str, num_params = repr(module)
|
20 |
+
mod_str = nn.modules.module._addindent(mod_str, 2)
|
21 |
+
child_lines.append('(' + key + '): ' + mod_str)
|
22 |
+
total_params += num_params
|
23 |
+
lines = extra_lines + child_lines
|
24 |
+
|
25 |
+
for name, p in model._parameters.items():
|
26 |
+
if hasattr(p, 'shape'):
|
27 |
+
total_params += reduce(lambda x, y: x * y, p.shape)
|
28 |
+
|
29 |
+
main_str = model._get_name() + '('
|
30 |
+
if lines:
|
31 |
+
# simple one-liner info, which most builtin Modules will use
|
32 |
+
if len(extra_lines) == 1 and not child_lines:
|
33 |
+
main_str += extra_lines[0]
|
34 |
+
else:
|
35 |
+
main_str += '\n ' + '\n '.join(lines) + '\n'
|
36 |
+
|
37 |
+
main_str += ')'
|
38 |
+
if file is sys.stdout:
|
39 |
+
main_str += ', \033[92m{:,}\033[0m params'.format(total_params)
|
40 |
+
else:
|
41 |
+
main_str += ', {:,} params'.format(total_params)
|
42 |
+
return main_str, total_params
|
43 |
+
|
44 |
+
string, count = repr(model)
|
45 |
+
if file is not None:
|
46 |
+
if isinstance(file, str):
|
47 |
+
file = open(file, 'w')
|
48 |
+
print(string, file=file)
|
49 |
+
file.flush()
|
50 |
+
|
51 |
+
return count
|
52 |
+
|
53 |
+
|
54 |
+
def grad_norm(model: nn.Module):
|
55 |
+
total_norm = 0
|
56 |
+
for p in model.parameters():
|
57 |
+
param_norm = p.grad.data.norm(2)
|
58 |
+
total_norm += param_norm.item() ** 2
|
59 |
+
return total_norm ** 0.5
|
60 |
+
|
61 |
+
def distributed():
|
62 |
+
return dist.is_available() and dist.is_initialized()
|
download_dataset.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import sys
|
3 |
+
import requests
|
4 |
+
from tqdm import tqdm
|
5 |
+
|
6 |
+
subdir = 'data'
|
7 |
+
if not os.path.exists(subdir):
|
8 |
+
os.makedirs(subdir)
|
9 |
+
subdir = subdir.replace('\\','/') # needed for Windows
|
10 |
+
|
11 |
+
for ds in [
|
12 |
+
'webtext',
|
13 |
+
'small-117M', 'small-117M-k40',
|
14 |
+
'medium-345M', 'medium-345M-k40',
|
15 |
+
'large-762M', 'large-762M-k40',
|
16 |
+
'xl-1542M', 'xl-1542M-k40',
|
17 |
+
]:
|
18 |
+
for split in ['train', 'valid', 'test']:
|
19 |
+
filename = ds + "." + split + '.jsonl'
|
20 |
+
r = requests.get("https://storage.googleapis.com/gpt-2/output-dataset/v1/" + filename, stream=True)
|
21 |
+
|
22 |
+
with open(os.path.join(subdir, filename), 'wb') as f:
|
23 |
+
file_size = int(r.headers["content-length"])
|
24 |
+
chunk_size = 1000
|
25 |
+
with tqdm(ncols=100, desc="Fetching " + filename, total=file_size, unit_scale=True) as pbar:
|
26 |
+
# 1k for chunk_size, since Ethernet packet size is around 1500 bytes
|
27 |
+
for chunk in r.iter_content(chunk_size=chunk_size):
|
28 |
+
f.write(chunk)
|
29 |
+
pbar.update(chunk_size)
|
images/detection_by_length.png
ADDED
![]() |
images/parts_of_speech.png
ADDED
![]() |
images/self_detection_k40.png
ADDED
![]() |
images/self_detection_t1.png
ADDED
![]() |
post_endpoint.patch
ADDED
Binary file (4.02 kB). View file
|
|
requirements.txt
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
absl-py==0.8.1
|
2 |
+
boto3==1.10.11
|
3 |
+
botocore==1.13.11
|
4 |
+
cachetools==3.1.1
|
5 |
+
certifi==2019.9.11
|
6 |
+
chardet==3.0.4
|
7 |
+
Click==7.0
|
8 |
+
docutils==0.15.2
|
9 |
+
fire==0.2.1
|
10 |
+
google-auth==1.7.0
|
11 |
+
google-auth-oauthlib==0.4.1
|
12 |
+
grpcio==1.25.0
|
13 |
+
idna==2.8
|
14 |
+
jmespath==0.9.4
|
15 |
+
joblib==0.14.0
|
16 |
+
Markdown==3.1.1
|
17 |
+
numpy==1.17.3
|
18 |
+
oauthlib==3.1.0
|
19 |
+
protobuf==3.10.0
|
20 |
+
pyasn1==0.4.7
|
21 |
+
pyasn1-modules==0.2.7
|
22 |
+
python-dateutil==2.8.0
|
23 |
+
regex==2019.11.1
|
24 |
+
requests==2.22.0
|
25 |
+
requests-oauthlib==1.3.0
|
26 |
+
rsa==4.0
|
27 |
+
s3transfer==0.2.1
|
28 |
+
sacremoses==0.0.35
|
29 |
+
sentencepiece==0.1.83
|
30 |
+
six==1.13.0
|
31 |
+
tensorboard==2.0.1
|
32 |
+
termcolor==1.1.0
|
33 |
+
torch==1.3.0
|
34 |
+
tqdm==4.37.0
|
35 |
+
transformers==2.1.1
|
36 |
+
urllib3==1.25.6
|
37 |
+
Werkzeug==0.16.0
|