applied-ai-018 commited on
Commit
bf1eb83
·
verified ·
1 Parent(s): 798273c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  2. lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml +43 -0
  3. lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log +34 -0
  4. lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt +155 -0
  5. lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-metadata.json +850 -0
  6. lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json +1 -0
  7. lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug-internal.log +183 -0
  8. lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug.log +29 -0
  9. lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/run-zatipfuv.wandb +0 -0
  10. lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug-internal.log +182 -0
  11. lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug.log +29 -0
  12. lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/run-cegnkp0e.wandb +0 -0
  13. lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/config.yaml +33 -0
  14. lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log +5 -0
  15. lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/requirements.txt +153 -0
  16. lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/wandb-metadata.json +850 -0
  17. lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug-internal.log +49 -0
  18. lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug.log +28 -0
  19. lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/run-wtsee6ig.wandb +0 -0
  20. venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py34_np19.gz +3 -0
  21. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__init__.py +65 -0
  22. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/__init__.cpython-310.pyc +0 -0
  23. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/configuration_audio_spectrogram_transformer.cpython-310.pyc +0 -0
  24. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/convert_audio_spectrogram_transformer_original_to_pytorch.cpython-310.pyc +0 -0
  25. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/feature_extraction_audio_spectrogram_transformer.cpython-310.pyc +0 -0
  26. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/modeling_audio_spectrogram_transformer.cpython-310.pyc +0 -0
  27. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py +124 -0
  28. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py +279 -0
  29. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py +236 -0
  30. venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py +613 -0
  31. venv/lib/python3.10/site-packages/transformers/models/clip/__init__.py +183 -0
  32. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/__init__.cpython-310.pyc +0 -0
  33. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/configuration_clip.cpython-310.pyc +0 -0
  34. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/convert_clip_original_pytorch_to_hf.cpython-310.pyc +0 -0
  35. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/feature_extraction_clip.cpython-310.pyc +0 -0
  36. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc +0 -0
  37. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc +0 -0
  38. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc +0 -0
  39. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_tf_clip.cpython-310.pyc +0 -0
  40. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc +0 -0
  41. venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc +0 -0
  42. venv/lib/python3.10/site-packages/transformers/models/clip/configuration_clip.py +456 -0
  43. venv/lib/python3.10/site-packages/transformers/models/clip/convert_clip_original_pytorch_to_hf.py +148 -0
  44. venv/lib/python3.10/site-packages/transformers/models/clip/feature_extraction_clip.py +33 -0
  45. venv/lib/python3.10/site-packages/transformers/models/clip/image_processing_clip.py +346 -0
  46. venv/lib/python3.10/site-packages/transformers/models/clip/modeling_clip.py +1416 -0
  47. venv/lib/python3.10/site-packages/transformers/models/clip/modeling_flax_clip.py +1295 -0
  48. venv/lib/python3.10/site-packages/transformers/models/clip/modeling_tf_clip.py +1461 -0
  49. venv/lib/python3.10/site-packages/transformers/models/clip/processing_clip.py +153 -0
  50. venv/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip.py +516 -0
ckpts/universal/global_step20/zero/18.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88b4e64ca27c88184597a7e9749498dc005de5e5037ce4d97f04fdb8b4548ac4
3
+ size 33555627
lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.41.1
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1716439886
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 2:
26
+ - 1
27
+ - 5
28
+ - 11
29
+ - 49
30
+ - 51
31
+ - 53
32
+ - 55
33
+ - 71
34
+ - 98
35
+ - 100
36
+ 3:
37
+ - 23
38
+ 4: 3.10.12
39
+ 5: 0.17.0
40
+ 6: 4.41.1
41
+ 8:
42
+ - 5
43
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ 2024-05-23:04:51:27,146 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-23:04:51:36,123 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'hellaswag', 'mrpc', 'openbookqa', 'sst2', 'winogrande']
4
+ 2024-05-23:04:51:36,124 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-23:04:51:36,125 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100'}
6
+ 2024-05-23:04:51:38,444 INFO [huggingface.py:164] Using device 'cuda'
7
+ Traceback (most recent call last):
8
+ File "/usr/lib/python3.10/runpy.py", line 196, in _run_module_as_main
9
+ return _run_code(code, main_globals, None,
10
+ File "/usr/lib/python3.10/runpy.py", line 86, in _run_code
11
+ exec(code, run_globals)
12
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 417, in <module>
13
+ cli_evaluate()
14
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/__main__.py", line 341, in cli_evaluate
15
+ results = evaluator.simple_evaluate(
16
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/utils.py", line 288, in _wrapper
17
+ return fn(*args, **kwargs)
18
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/evaluator.py", line 180, in simple_evaluate
19
+ lm = lm_eval.api.registry.get_model(model).create_from_arg_string(
20
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/api/model.py", line 134, in create_from_arg_string
21
+ return cls(**args, **args2)
22
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 190, in __init__
23
+ self._get_config(
24
+ File "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/lm_eval/models/huggingface.py", line 471, in _get_config
25
+ self._config = transformers.AutoConfig.from_pretrained(
26
+ File "/usr/local/lib/python3.10/dist-packages/transformers/models/auto/configuration_auto.py", line 934, in from_pretrained
27
+ config_dict, unused_kwargs = PretrainedConfig.get_config_dict(pretrained_model_name_or_path, **kwargs)
28
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 632, in get_config_dict
29
+ config_dict, kwargs = cls._get_config_dict(pretrained_model_name_or_path, **kwargs)
30
+ File "/usr/local/lib/python3.10/dist-packages/transformers/configuration_utils.py", line 689, in _get_config_dict
31
+ resolved_config_file = cached_file(
32
+ File "/usr/local/lib/python3.10/dist-packages/transformers/utils/hub.py", line 370, in cached_file
33
+ raise EnvironmentError(
34
+ OSError: /mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100 does not appear to have a file named config.json. Checkout 'https://huggingface.co//mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100/tree/main' for available files.
lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.4
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.3
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.5
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.1
33
+ expecttest==0.2.1
34
+ filelock==3.14.0
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.63.0
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.1
49
+ identify==2.5.36
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pillow==10.3.0
81
+ pip==22.0.2
82
+ pip==23.3.1
83
+ platformdirs==4.2.1
84
+ pluggy==1.5.0
85
+ portalocker==2.8.2
86
+ pre-commit==3.3.3
87
+ pretty-errors==1.2.25
88
+ protobuf==3.20.3
89
+ psutil==5.9.8
90
+ py-cpuinfo==9.0.0
91
+ pyarrow-hotfix==0.6
92
+ pyarrow==16.1.0
93
+ pyasn1==0.6.0
94
+ pyasn1_modules==0.4.0
95
+ pybind11==2.10.4
96
+ pycparser==2.22
97
+ pydantic==1.10.13
98
+ pynvml==8.0.4
99
+ pytablewriter==1.2.0
100
+ pytest==8.2.0
101
+ python-dateutil==2.9.0.post0
102
+ pytorch-lightning==2.2.4
103
+ pytz==2024.1
104
+ regex==2023.5.5
105
+ requests-oauthlib==2.0.0
106
+ requests==2.31.0
107
+ rouge_score==0.1.2
108
+ rsa==4.9
109
+ sacrebleu==2.4.2
110
+ safetensors==0.4.3
111
+ scikit-learn==1.5.0
112
+ scipy==1.13.1
113
+ sentencepiece==0.2.0
114
+ sentry-sdk==2.2.1
115
+ setproctitle==1.3.3
116
+ setuptools==59.6.0
117
+ setuptools==69.5.1
118
+ six==1.16.0
119
+ smmap==5.0.1
120
+ sqlitedict==2.1.0
121
+ symengine==0.11.0
122
+ sympy==1.12
123
+ tabledata==1.3.3
124
+ tabulate==0.9.0
125
+ tcolorpy==0.1.6
126
+ tdqm==0.0.1
127
+ tensorboard-data-server==0.6.1
128
+ tensorboard-plugin-wit==1.8.1
129
+ tensorboard==2.11.2
130
+ threadpoolctl==3.5.0
131
+ tokenizers==0.19.1
132
+ tomli==2.0.1
133
+ torch==2.2.0a0+git8964477
134
+ torch_tb_profiler==0.4.0
135
+ torchaudio==2.2.0+08901ad
136
+ torchdata==0.7.1+5e6f7b7
137
+ torchmetrics==1.4.0
138
+ torchtext==0.17.0+400da5c
139
+ torchvision==0.17.0+b2383d4
140
+ tqdm-multiprocess==0.0.11
141
+ tqdm==4.66.4
142
+ transformers==4.41.1
143
+ typepy==1.3.2
144
+ typing_extensions==4.11.0
145
+ tzdata==2024.1
146
+ urllib3==1.26.18
147
+ virtualenv==20.26.1
148
+ wandb==0.17.0
149
+ wheel==0.37.1
150
+ wheel==0.43.0
151
+ word2number==1.1
152
+ xxhash==3.4.1
153
+ yamllint==1.35.1
154
+ yarl==1.9.4
155
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-23T04:51:26.939641",
5
+ "startedAt": "2024-05-23T04:51:26.427629",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100",
13
+ "--tasks",
14
+ "hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=bharatgpt,group=trial_expt_2"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-debug-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2334.34245625,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3399.997,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3341.627,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3399.997,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 3336.69,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 3335.088,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 212.20449829101562
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.43798828125
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"_wandb": {"runtime": 11}}
lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug-internal.log ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 04:51:26,452 INFO StreamThr :811 [internal.py:wandb_internal():85] W&B internal server running at pid: 811, started at: 2024-05-23 04:51:26.450632
2
+ 2024-05-23 04:51:26,457 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 04:51:26,458 INFO WriterThread:811 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/run-zatipfuv.wandb
4
+ 2024-05-23 04:51:26,459 DEBUG SenderThread:811 [sender.py:send():378] send: header
5
+ 2024-05-23 04:51:26,462 DEBUG SenderThread:811 [sender.py:send():378] send: run
6
+ 2024-05-23 04:51:26,718 INFO SenderThread:811 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files
7
+ 2024-05-23 04:51:26,718 INFO SenderThread:811 [sender.py:_start_run_threads():1123] run started: zatipfuv with start time 1716439886.450711
8
+ 2024-05-23 04:51:26,725 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 04:51:26,725 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 04:51:26,844 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 04:51:26,847 DEBUG HandlerThread:811 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 04:51:26,847 DEBUG HandlerThread:811 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 04:51:26,847 INFO HandlerThread:811 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 04:51:26,847 INFO SystemMonitor:811 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 04:51:26,847 INFO HandlerThread:811 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 04:51:26,854 INFO SystemMonitor:811 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 04:51:26,854 INFO SystemMonitor:811 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 04:51:26,855 INFO SystemMonitor:811 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 04:51:26,855 INFO SystemMonitor:811 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 04:51:26,939 DEBUG HandlerThread:811 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 04:51:26,942 DEBUG HandlerThread:811 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 04:51:26,952 ERROR HandlerThread:811 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 04:51:26,952 DEBUG HandlerThread:811 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 04:51:26,952 DEBUG HandlerThread:811 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 04:51:26,952 DEBUG HandlerThread:811 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T04:51:26.939641', 'startedAt': '2024-05-23T04:51:26.427629', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step100', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2334.34245625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3341.627, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3336.69, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3335.088, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.20449829101562}}, 'memory': {'total': 1007.43798828125}}
31
+ 2024-05-23 04:51:26,953 INFO HandlerThread:811 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 04:51:26,953 INFO HandlerThread:811 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 04:51:26,956 INFO HandlerThread:811 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 04:51:26,961 DEBUG SenderThread:811 [sender.py:send():378] send: files
35
+ 2024-05-23 04:51:26,961 INFO SenderThread:811 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 04:51:27,141 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 04:51:27,141 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 04:51:27,143 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 04:51:27,143 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 04:51:27,249 DEBUG SenderThread:811 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 04:51:27,597 INFO wandb-upload_0:811 [upload_job.py:push():130] Uploaded file /tmp/tmp_076n2_ewandb/d4n2m7i8-wandb-metadata.json
42
+ 2024-05-23 04:51:27,720 INFO Thread-12 :811 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-metadata.json
43
+ 2024-05-23 04:51:27,720 INFO Thread-12 :811 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt
44
+ 2024-05-23 04:51:27,720 INFO Thread-12 :811 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log
45
+ 2024-05-23 04:51:29,720 INFO Thread-12 :811 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log
46
+ 2024-05-23 04:51:32,250 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 04:51:37,726 INFO Thread-12 :811 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log
48
+ 2024-05-23 04:51:38,126 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: status_report
49
+ 2024-05-23 04:51:38,457 DEBUG SenderThread:811 [sender.py:send():378] send: exit
50
+ 2024-05-23 04:51:38,457 INFO SenderThread:811 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 04:51:38,457 INFO SenderThread:811 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 04:51:38,459 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 04:51:38,459 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 04:51:38,459 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 04:51:38,459 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 04:51:38,459 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 04:51:38,459 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 04:51:38,459 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 04:51:38,460 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 04:51:38,460 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 04:51:38,460 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 04:51:38,460 INFO HandlerThread:811 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 04:51:38,460 DEBUG SystemMonitor:811 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-23 04:51:38,460 DEBUG SystemMonitor:811 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-23 04:51:38,460 DEBUG SystemMonitor:811 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-23 04:51:38,460 INFO HandlerThread:811 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-23 04:51:38,462 INFO HandlerThread:811 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-23 04:51:38,462 INFO HandlerThread:811 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-23 04:51:38,462 INFO HandlerThread:811 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-23 04:51:38,462 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 04:51:38,463 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 04:51:38,463 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 04:51:38,463 DEBUG SenderThread:811 [sender.py:send():378] send: stats
78
+ 2024-05-23 04:51:38,463 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
79
+ 2024-05-23 04:51:38,464 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 3
80
+ 2024-05-23 04:51:38,464 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
81
+ 2024-05-23 04:51:38,464 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 3
82
+ 2024-05-23 04:51:38,464 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 4
83
+ 2024-05-23 04:51:38,464 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
84
+ 2024-05-23 04:51:38,464 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 4
85
+ 2024-05-23 04:51:38,464 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
86
+ 2024-05-23 04:51:38,464 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 4
87
+ 2024-05-23 04:51:38,464 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 5
88
+ 2024-05-23 04:51:38,464 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
89
+ 2024-05-23 04:51:38,464 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 5
90
+ 2024-05-23 04:51:38,465 DEBUG SenderThread:811 [sender.py:send():378] send: summary
91
+ 2024-05-23 04:51:38,465 INFO SenderThread:811 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
92
+ 2024-05-23 04:51:38,466 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
93
+ 2024-05-23 04:51:38,466 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 5
94
+ 2024-05-23 04:51:38,466 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 6
95
+ 2024-05-23 04:51:38,466 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
96
+ 2024-05-23 04:51:38,466 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 6
97
+ 2024-05-23 04:51:38,466 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
98
+ 2024-05-23 04:51:38,466 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 6
99
+ 2024-05-23 04:51:38,470 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: status_report
100
+ 2024-05-23 04:51:38,560 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 7
101
+ 2024-05-23 04:51:38,560 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
102
+ 2024-05-23 04:51:38,560 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 7
103
+ 2024-05-23 04:51:38,560 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
104
+ 2024-05-23 04:51:38,560 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 7
105
+ 2024-05-23 04:51:38,728 INFO Thread-12 :811 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml
106
+ 2024-05-23 04:51:38,728 INFO Thread-12 :811 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json
107
+ 2024-05-23 04:51:39,274 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 8
108
+ 2024-05-23 04:51:39,275 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
109
+ 2024-05-23 04:51:39,275 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 8
110
+ 2024-05-23 04:51:39,275 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
111
+ 2024-05-23 04:51:39,275 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 8
112
+ 2024-05-23 04:51:39,275 INFO SenderThread:811 [job_builder.py:build():432] Attempting to build job artifact
113
+ 2024-05-23 04:51:39,275 INFO SenderThread:811 [job_builder.py:_get_source_type():576] no source found
114
+ 2024-05-23 04:51:39,276 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 9
115
+ 2024-05-23 04:51:39,276 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
116
+ 2024-05-23 04:51:39,276 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 9
117
+ 2024-05-23 04:51:39,276 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
118
+ 2024-05-23 04:51:39,276 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 9
119
+ 2024-05-23 04:51:39,276 INFO SenderThread:811 [dir_watcher.py:finish():358] shutting down directory watcher
120
+ 2024-05-23 04:51:39,457 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: poll_exit
121
+ 2024-05-23 04:51:39,729 INFO SenderThread:811 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log
122
+ 2024-05-23 04:51:39,729 INFO SenderThread:811 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files
123
+ 2024-05-23 04:51:39,729 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log output.log
124
+ 2024-05-23 04:51:39,729 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt requirements.txt
125
+ 2024-05-23 04:51:39,732 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json wandb-summary.json
126
+ 2024-05-23 04:51:39,732 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-metadata.json wandb-metadata.json
127
+ 2024-05-23 04:51:39,732 INFO SenderThread:811 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml config.yaml
128
+ 2024-05-23 04:51:39,732 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 10
129
+ 2024-05-23 04:51:39,732 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: poll_exit
130
+ 2024-05-23 04:51:39,732 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
131
+ 2024-05-23 04:51:39,734 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 10
132
+ 2024-05-23 04:51:39,735 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
133
+ 2024-05-23 04:51:39,737 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 10
134
+ 2024-05-23 04:51:39,737 INFO SenderThread:811 [file_pusher.py:finish():169] shutting down file pusher
135
+ 2024-05-23 04:51:39,965 INFO wandb-upload_0:811 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/output.log
136
+ 2024-05-23 04:51:40,294 INFO wandb-upload_2:811 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/wandb-summary.json
137
+ 2024-05-23 04:51:40,329 INFO wandb-upload_1:811 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/requirements.txt
138
+ 2024-05-23 04:51:40,330 INFO wandb-upload_3:811 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/files/config.yaml
139
+ 2024-05-23 04:51:40,458 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: poll_exit
140
+ 2024-05-23 04:51:40,458 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: poll_exit
141
+ 2024-05-23 04:51:40,530 INFO Thread-11 (_thread_body):811 [sender.py:transition_state():613] send defer: 11
142
+ 2024-05-23 04:51:40,530 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
143
+ 2024-05-23 04:51:40,530 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 11
144
+ 2024-05-23 04:51:40,531 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
145
+ 2024-05-23 04:51:40,531 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 11
146
+ 2024-05-23 04:51:40,531 INFO SenderThread:811 [file_pusher.py:join():175] waiting for file pusher
147
+ 2024-05-23 04:51:40,531 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 12
148
+ 2024-05-23 04:51:40,531 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
149
+ 2024-05-23 04:51:40,531 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 12
150
+ 2024-05-23 04:51:40,531 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
151
+ 2024-05-23 04:51:40,531 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 12
152
+ 2024-05-23 04:51:40,531 INFO SenderThread:811 [file_stream.py:finish():601] file stream finish called
153
+ 2024-05-23 04:51:40,591 INFO SenderThread:811 [file_stream.py:finish():605] file stream finish is done
154
+ 2024-05-23 04:51:40,591 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 13
155
+ 2024-05-23 04:51:40,591 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
156
+ 2024-05-23 04:51:40,591 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 13
157
+ 2024-05-23 04:51:40,591 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
158
+ 2024-05-23 04:51:40,591 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 13
159
+ 2024-05-23 04:51:40,591 INFO SenderThread:811 [sender.py:transition_state():613] send defer: 14
160
+ 2024-05-23 04:51:40,591 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: defer
161
+ 2024-05-23 04:51:40,591 INFO HandlerThread:811 [handler.py:handle_request_defer():184] handle defer: 14
162
+ 2024-05-23 04:51:40,591 DEBUG SenderThread:811 [sender.py:send():378] send: final
163
+ 2024-05-23 04:51:40,591 DEBUG SenderThread:811 [sender.py:send():378] send: footer
164
+ 2024-05-23 04:51:40,592 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: defer
165
+ 2024-05-23 04:51:40,592 INFO SenderThread:811 [sender.py:send_request_defer():609] handle sender defer: 14
166
+ 2024-05-23 04:51:40,592 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-23 04:51:40,592 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: poll_exit
168
+ 2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: poll_exit
169
+ 2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: server_info
170
+ 2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: get_summary
171
+ 2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: sampled_history
172
+ 2024-05-23 04:51:40,593 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: internal_messages
173
+ 2024-05-23 04:51:40,593 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: poll_exit
174
+ 2024-05-23 04:51:40,593 DEBUG SenderThread:811 [sender.py:send_request():405] send_request: server_info
175
+ 2024-05-23 04:51:40,654 INFO MainThread:811 [wandb_run.py:_footer_history_summary_info():3994] rendering history
176
+ 2024-05-23 04:51:40,654 INFO MainThread:811 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
177
+ 2024-05-23 04:51:40,654 INFO MainThread:811 [wandb_run.py:_footer_sync_info():3953] logging synced files
178
+ 2024-05-23 04:51:40,654 DEBUG HandlerThread:811 [handler.py:handle_request():158] handle_request: shutdown
179
+ 2024-05-23 04:51:40,655 INFO HandlerThread:811 [handler.py:finish():882] shutting down handler
180
+ 2024-05-23 04:51:41,593 INFO WriterThread:811 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/run-zatipfuv.wandb
181
+ 2024-05-23 04:51:41,654 INFO SenderThread:811 [sender.py:finish():1545] shutting down sender
182
+ 2024-05-23 04:51:41,654 INFO SenderThread:811 [file_pusher.py:finish():169] shutting down file pusher
183
+ 2024-05-23 04:51:41,654 INFO SenderThread:811 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Configure stats pid to 655
3
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 04:51:26,445 WARNING MainThread:655 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug.log
11
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/logs/debug-internal.log
12
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 04:51:26,445 INFO MainThread:655 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 04:51:26,449 INFO MainThread:655 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 04:51:26,450 INFO MainThread:655 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 04:51:26,453 INFO MainThread:655 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 04:51:26,461 INFO MainThread:655 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 04:51:26,725 INFO MainThread:655 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 04:51:26,838 INFO MainThread:655 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 04:51:26,838 INFO MainThread:655 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 04:51:27,142 INFO MainThread:655 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 04:51:27,142 INFO MainThread:655 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 04:51:27,142 INFO MainThread:655 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 04:51:27,142 INFO MainThread:655 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 04:51:27,144 INFO MainThread:655 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 04:51:41,655 WARNING MsgRouterThr:655 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_045126-zatipfuv/run-zatipfuv.wandb ADDED
Binary file (11.2 kB). View file
 
lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug-internal.log ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 04:55:44,631 INFO StreamThr :1763 [internal.py:wandb_internal():85] W&B internal server running at pid: 1763, started at: 2024-05-23 04:55:44.626261
2
+ 2024-05-23 04:55:44,633 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-23 04:55:44,633 INFO WriterThread:1763 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/run-cegnkp0e.wandb
4
+ 2024-05-23 04:55:44,635 DEBUG SenderThread:1763 [sender.py:send():378] send: header
5
+ 2024-05-23 04:55:44,640 DEBUG SenderThread:1763 [sender.py:send():378] send: run
6
+ 2024-05-23 04:55:44,913 INFO SenderThread:1763 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files
7
+ 2024-05-23 04:55:44,913 INFO SenderThread:1763 [sender.py:_start_run_threads():1123] run started: cegnkp0e with start time 1716440144.626722
8
+ 2024-05-23 04:55:44,916 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-23 04:55:44,917 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-23 04:55:45,037 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-23 04:55:45,039 DEBUG HandlerThread:1763 [system_info.py:__init__():26] System info init
12
+ 2024-05-23 04:55:45,039 DEBUG HandlerThread:1763 [system_info.py:__init__():41] System info init done
13
+ 2024-05-23 04:55:45,039 INFO HandlerThread:1763 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-23 04:55:45,039 INFO SystemMonitor:1763 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-23 04:55:45,039 INFO HandlerThread:1763 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-23 04:55:45,046 INFO SystemMonitor:1763 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-23 04:55:45,046 INFO SystemMonitor:1763 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-23 04:55:45,048 INFO SystemMonitor:1763 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-23 04:55:45,049 INFO SystemMonitor:1763 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-23 04:55:45,111 DEBUG HandlerThread:1763 [system_info.py:probe():150] Probing system
21
+ 2024-05-23 04:55:45,115 DEBUG HandlerThread:1763 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-23 04:55:45,125 ERROR HandlerThread:1763 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-23 04:55:45,125 DEBUG HandlerThread:1763 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-23 04:55:45,125 DEBUG HandlerThread:1763 [system_info.py:probe():198] Probing system done
30
+ 2024-05-23 04:55:45,125 DEBUG HandlerThread:1763 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-23T04:55:45.111786', 'startedAt': '2024-05-23T04:55:44.608684', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/checkpoint/llamav2-3b//hf_ckpt//global_step12000', '--tasks', 'hellaswag,arc_easy,openbookqa,winogrande,sst2,mrpc', '--batch_size', 'auto', '--wandb_args', 'project=bharatgpt,group=trial_expt_2'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.4999875000003, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3392.442, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3368.04, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3117.704, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 212.19972229003906}}, 'memory': {'total': 1007.43798828125}}
31
+ 2024-05-23 04:55:45,125 INFO HandlerThread:1763 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-23 04:55:45,125 INFO HandlerThread:1763 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-23 04:55:45,128 INFO HandlerThread:1763 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-23 04:55:45,133 DEBUG SenderThread:1763 [sender.py:send():378] send: files
35
+ 2024-05-23 04:55:45,133 INFO SenderThread:1763 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-23 04:55:45,305 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-23 04:55:45,305 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-23 04:55:45,306 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-23 04:55:45,307 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-23 04:55:45,432 DEBUG SenderThread:1763 [sender.py:send():378] send: telemetry
41
+ 2024-05-23 04:55:45,708 INFO wandb-upload_0:1763 [upload_job.py:push():130] Uploaded file /tmp/tmpmuiczo1owandb/lvic4v80-wandb-metadata.json
42
+ 2024-05-23 04:55:45,915 INFO Thread-12 :1763 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/requirements.txt
43
+ 2024-05-23 04:55:45,916 INFO Thread-12 :1763 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log
44
+ 2024-05-23 04:55:45,916 INFO Thread-12 :1763 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-metadata.json
45
+ 2024-05-23 04:55:47,915 INFO Thread-12 :1763 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log
46
+ 2024-05-23 04:55:50,438 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-23 04:55:55,768 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-23 04:55:55,922 INFO Thread-12 :1763 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log
49
+ 2024-05-23 04:55:56,082 DEBUG SenderThread:1763 [sender.py:send():378] send: exit
50
+ 2024-05-23 04:55:56,082 INFO SenderThread:1763 [sender.py:send_exit():585] handling exit code: 1
51
+ 2024-05-23 04:55:56,082 INFO SenderThread:1763 [sender.py:send_exit():587] handling runtime: 11
52
+ 2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
53
+ 2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:send_exit():593] send defer
54
+ 2024-05-23 04:55:56,084 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
55
+ 2024-05-23 04:55:56,084 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 0
56
+ 2024-05-23 04:55:56,084 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
57
+ 2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 0
58
+ 2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 1
59
+ 2024-05-23 04:55:56,084 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
60
+ 2024-05-23 04:55:56,084 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 1
61
+ 2024-05-23 04:55:56,084 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
62
+ 2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 1
63
+ 2024-05-23 04:55:56,084 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 2
64
+ 2024-05-23 04:55:56,085 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
65
+ 2024-05-23 04:55:56,085 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 2
66
+ 2024-05-23 04:55:56,085 INFO HandlerThread:1763 [system_monitor.py:finish():203] Stopping system monitor
67
+ 2024-05-23 04:55:56,085 DEBUG SystemMonitor:1763 [system_monitor.py:_start():172] Starting system metrics aggregation loop
68
+ 2024-05-23 04:55:56,085 DEBUG SystemMonitor:1763 [system_monitor.py:_start():179] Finished system metrics aggregation loop
69
+ 2024-05-23 04:55:56,085 DEBUG SystemMonitor:1763 [system_monitor.py:_start():183] Publishing last batch of metrics
70
+ 2024-05-23 04:55:56,086 INFO HandlerThread:1763 [interfaces.py:finish():200] Joined cpu monitor
71
+ 2024-05-23 04:55:56,086 INFO HandlerThread:1763 [interfaces.py:finish():200] Joined disk monitor
72
+ 2024-05-23 04:55:56,086 INFO HandlerThread:1763 [interfaces.py:finish():200] Joined memory monitor
73
+ 2024-05-23 04:55:56,086 INFO HandlerThread:1763 [interfaces.py:finish():200] Joined network monitor
74
+ 2024-05-23 04:55:56,086 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
75
+ 2024-05-23 04:55:56,086 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 2
76
+ 2024-05-23 04:55:56,086 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 3
77
+ 2024-05-23 04:55:56,086 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
78
+ 2024-05-23 04:55:56,086 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 3
79
+ 2024-05-23 04:55:56,086 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
80
+ 2024-05-23 04:55:56,086 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 3
81
+ 2024-05-23 04:55:56,086 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 4
82
+ 2024-05-23 04:55:56,086 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
83
+ 2024-05-23 04:55:56,087 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 4
84
+ 2024-05-23 04:55:56,087 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
85
+ 2024-05-23 04:55:56,087 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 4
86
+ 2024-05-23 04:55:56,087 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 5
87
+ 2024-05-23 04:55:56,087 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
88
+ 2024-05-23 04:55:56,087 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 5
89
+ 2024-05-23 04:55:56,087 DEBUG SenderThread:1763 [sender.py:send():378] send: summary
90
+ 2024-05-23 04:55:56,088 INFO SenderThread:1763 [sender.py:_save_file():1389] saving file wandb-summary.json with policy end
91
+ 2024-05-23 04:55:56,088 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
92
+ 2024-05-23 04:55:56,088 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 5
93
+ 2024-05-23 04:55:56,088 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 6
94
+ 2024-05-23 04:55:56,088 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
95
+ 2024-05-23 04:55:56,088 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 6
96
+ 2024-05-23 04:55:56,088 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
97
+ 2024-05-23 04:55:56,088 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 6
98
+ 2024-05-23 04:55:56,093 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: status_report
99
+ 2024-05-23 04:55:56,177 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 7
100
+ 2024-05-23 04:55:56,177 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
101
+ 2024-05-23 04:55:56,177 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 7
102
+ 2024-05-23 04:55:56,177 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
103
+ 2024-05-23 04:55:56,177 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 7
104
+ 2024-05-23 04:55:56,923 INFO Thread-12 :1763 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/config.yaml
105
+ 2024-05-23 04:55:56,924 INFO Thread-12 :1763 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-summary.json
106
+ 2024-05-23 04:55:57,082 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: poll_exit
107
+ 2024-05-23 04:55:57,460 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 8
108
+ 2024-05-23 04:55:57,460 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: poll_exit
109
+ 2024-05-23 04:55:57,460 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
110
+ 2024-05-23 04:55:57,460 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 8
111
+ 2024-05-23 04:55:57,460 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
112
+ 2024-05-23 04:55:57,460 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 8
113
+ 2024-05-23 04:55:57,460 INFO SenderThread:1763 [job_builder.py:build():432] Attempting to build job artifact
114
+ 2024-05-23 04:55:57,461 INFO SenderThread:1763 [job_builder.py:_get_source_type():576] no source found
115
+ 2024-05-23 04:55:57,461 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 9
116
+ 2024-05-23 04:55:57,461 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
117
+ 2024-05-23 04:55:57,461 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 9
118
+ 2024-05-23 04:55:57,461 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
119
+ 2024-05-23 04:55:57,461 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 9
120
+ 2024-05-23 04:55:57,461 INFO SenderThread:1763 [dir_watcher.py:finish():358] shutting down directory watcher
121
+ 2024-05-23 04:55:57,925 INFO SenderThread:1763 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log
122
+ 2024-05-23 04:55:57,925 INFO SenderThread:1763 [dir_watcher.py:finish():388] scan: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files
123
+ 2024-05-23 04:55:57,925 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log output.log
124
+ 2024-05-23 04:55:57,925 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/requirements.txt requirements.txt
125
+ 2024-05-23 04:55:57,928 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-summary.json wandb-summary.json
126
+ 2024-05-23 04:55:57,930 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-metadata.json wandb-metadata.json
127
+ 2024-05-23 04:55:57,930 INFO SenderThread:1763 [dir_watcher.py:finish():402] scan save: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/config.yaml config.yaml
128
+ 2024-05-23 04:55:57,932 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 10
129
+ 2024-05-23 04:55:57,932 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
130
+ 2024-05-23 04:55:57,932 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 10
131
+ 2024-05-23 04:55:57,933 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
132
+ 2024-05-23 04:55:57,935 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 10
133
+ 2024-05-23 04:55:57,935 INFO SenderThread:1763 [file_pusher.py:finish():169] shutting down file pusher
134
+ 2024-05-23 04:55:58,082 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: poll_exit
135
+ 2024-05-23 04:55:58,083 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: poll_exit
136
+ 2024-05-23 04:55:58,178 INFO wandb-upload_0:1763 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/output.log
137
+ 2024-05-23 04:55:58,492 INFO wandb-upload_1:1763 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/requirements.txt
138
+ 2024-05-23 04:55:58,533 INFO wandb-upload_2:1763 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/wandb-summary.json
139
+ 2024-05-23 04:55:58,544 INFO wandb-upload_3:1763 [upload_job.py:push():130] Uploaded file /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/files/config.yaml
140
+ 2024-05-23 04:55:58,744 INFO Thread-11 (_thread_body):1763 [sender.py:transition_state():613] send defer: 11
141
+ 2024-05-23 04:55:58,744 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
142
+ 2024-05-23 04:55:58,744 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 11
143
+ 2024-05-23 04:55:58,744 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
144
+ 2024-05-23 04:55:58,744 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 11
145
+ 2024-05-23 04:55:58,744 INFO SenderThread:1763 [file_pusher.py:join():175] waiting for file pusher
146
+ 2024-05-23 04:55:58,745 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 12
147
+ 2024-05-23 04:55:58,745 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
148
+ 2024-05-23 04:55:58,745 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 12
149
+ 2024-05-23 04:55:58,745 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
150
+ 2024-05-23 04:55:58,745 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 12
151
+ 2024-05-23 04:55:58,745 INFO SenderThread:1763 [file_stream.py:finish():601] file stream finish called
152
+ 2024-05-23 04:55:58,805 INFO SenderThread:1763 [file_stream.py:finish():605] file stream finish is done
153
+ 2024-05-23 04:55:58,805 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 13
154
+ 2024-05-23 04:55:58,805 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
155
+ 2024-05-23 04:55:58,805 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 13
156
+ 2024-05-23 04:55:58,805 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
157
+ 2024-05-23 04:55:58,805 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 13
158
+ 2024-05-23 04:55:58,805 INFO SenderThread:1763 [sender.py:transition_state():613] send defer: 14
159
+ 2024-05-23 04:55:58,805 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: defer
160
+ 2024-05-23 04:55:58,805 INFO HandlerThread:1763 [handler.py:handle_request_defer():184] handle defer: 14
161
+ 2024-05-23 04:55:58,805 DEBUG SenderThread:1763 [sender.py:send():378] send: final
162
+ 2024-05-23 04:55:58,805 DEBUG SenderThread:1763 [sender.py:send():378] send: footer
163
+ 2024-05-23 04:55:58,805 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: defer
164
+ 2024-05-23 04:55:58,806 INFO SenderThread:1763 [sender.py:send_request_defer():609] handle sender defer: 14
165
+ 2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: poll_exit
166
+ 2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: poll_exit
167
+ 2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: server_info
168
+ 2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: get_summary
169
+ 2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: sampled_history
170
+ 2024-05-23 04:55:58,806 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: internal_messages
171
+ 2024-05-23 04:55:58,807 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: poll_exit
172
+ 2024-05-23 04:55:58,807 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: poll_exit
173
+ 2024-05-23 04:55:58,807 DEBUG SenderThread:1763 [sender.py:send_request():405] send_request: server_info
174
+ 2024-05-23 04:55:58,870 INFO MainThread:1763 [wandb_run.py:_footer_history_summary_info():3994] rendering history
175
+ 2024-05-23 04:55:58,870 INFO MainThread:1763 [wandb_run.py:_footer_history_summary_info():4026] rendering summary
176
+ 2024-05-23 04:55:58,870 INFO MainThread:1763 [wandb_run.py:_footer_sync_info():3953] logging synced files
177
+ 2024-05-23 04:55:58,870 DEBUG HandlerThread:1763 [handler.py:handle_request():158] handle_request: shutdown
178
+ 2024-05-23 04:55:58,870 INFO HandlerThread:1763 [handler.py:finish():882] shutting down handler
179
+ 2024-05-23 04:55:59,807 INFO WriterThread:1763 [datastore.py:close():296] close: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/run-cegnkp0e.wandb
180
+ 2024-05-23 04:55:59,870 INFO SenderThread:1763 [sender.py:finish():1545] shutting down sender
181
+ 2024-05-23 04:55:59,870 INFO SenderThread:1763 [file_pusher.py:finish():169] shutting down file pusher
182
+ 2024-05-23 04:55:59,870 INFO SenderThread:1763 [file_pusher.py:join():175] waiting for file pusher
lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug.log ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-23 04:55:44,620 INFO MainThread:1608 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-23 04:55:44,620 INFO MainThread:1608 [wandb_setup.py:_flush():76] Configure stats pid to 1608
3
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-23 04:55:44,621 WARNING MainThread:1608 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug.log
11
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/logs/debug-internal.log
12
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:init():610] starting backend
16
+ 2024-05-23 04:55:44,621 INFO MainThread:1608 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-23 04:55:44,625 INFO MainThread:1608 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-23 04:55:44,626 INFO MainThread:1608 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-23 04:55:44,629 INFO MainThread:1608 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-23 04:55:44,639 INFO MainThread:1608 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-23 04:55:44,916 INFO MainThread:1608 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-23 04:55:45,031 INFO MainThread:1608 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-23 04:55:45,031 INFO MainThread:1608 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-23 04:55:45,306 INFO MainThread:1608 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-23 04:55:45,306 INFO MainThread:1608 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-23 04:55:45,307 INFO MainThread:1608 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-23 04:55:45,307 INFO MainThread:1608 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-23 04:55:45,308 INFO MainThread:1608 [wandb_init.py:init():838] run started, returning control to user process
29
+ 2024-05-23 04:55:59,871 WARNING MsgRouterThr:1608 [router.py:message_loop():77] message_loop has been closed
lm-evaluation-harness/wandb/run-20240523_045544-cegnkp0e/run-cegnkp0e.wandb ADDED
Binary file (6.27 kB). View file
 
lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/config.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ wandb_version: 1
2
+
3
+ _wandb:
4
+ desc: null
5
+ value:
6
+ python_version: 3.10.12
7
+ cli_version: 0.17.0
8
+ framework: huggingface
9
+ huggingface_version: 4.36.2
10
+ is_jupyter_run: false
11
+ is_kaggle_kernel: false
12
+ start_time: 1717058855
13
+ t:
14
+ 1:
15
+ - 1
16
+ - 5
17
+ - 11
18
+ - 49
19
+ - 51
20
+ - 53
21
+ - 55
22
+ - 71
23
+ - 98
24
+ - 100
25
+ 3:
26
+ - 13
27
+ - 23
28
+ 4: 3.10.12
29
+ 5: 0.17.0
30
+ 6: 4.36.2
31
+ 8:
32
+ - 5
33
+ 13: linux-x86_64
lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+
2
+ 2024-05-30:08:47:36,676 INFO [__main__.py:251] Verbosity set to INFO
3
+ 2024-05-30:08:47:45,851 INFO [__main__.py:335] Selected Tasks: ['arc_easy', 'boolq', 'copa', 'mrpc', 'piqa', 'sst2', 'winogrande']
4
+ 2024-05-30:08:47:45,852 INFO [evaluator.py:131] Setting random seed to 0 | Setting numpy seed to 1234 | Setting torch manual seed to 1234
5
+ 2024-05-30:08:47:45,852 INFO [evaluator.py:177] Initializing hf model, with arguments: {'pretrained': '/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000', 'tokenizer': '/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/'}
lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/requirements.txt ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ DataProperty==1.0.1
2
+ GitPython==3.1.43
3
+ Jinja2==3.1.3
4
+ Markdown==3.6
5
+ MarkupSafe==2.1.5
6
+ Pillow-SIMD==7.0.0.post3
7
+ PyYAML==6.0
8
+ Werkzeug==3.0.2
9
+ absl-py==2.1.0
10
+ accelerate==0.30.1
11
+ aiohttp==3.9.4
12
+ aiosignal==1.3.1
13
+ async-timeout==4.0.3
14
+ attrs==23.2.0
15
+ av==9.2.0
16
+ cachetools==5.3.3
17
+ certifi==2024.2.2
18
+ cffi==1.15.1
19
+ cfgv==3.4.0
20
+ chardet==5.2.0
21
+ charset-normalizer==3.3.2
22
+ click==8.1.7
23
+ cmake==3.29.2
24
+ colorama==0.4.6
25
+ datasets==2.19.1
26
+ deepspeed==0.12.4+hpu.synapse.v1.15.1
27
+ dill==0.3.8
28
+ distlib==0.3.8
29
+ docker-pycreds==0.4.0
30
+ einops==0.8.0
31
+ evaluate==0.4.2
32
+ exceptiongroup==1.2.0
33
+ expecttest==0.2.1
34
+ filelock==3.13.4
35
+ frozenlist==1.4.1
36
+ fsspec==2024.3.1
37
+ gitdb==4.0.11
38
+ google-auth-oauthlib==0.4.6
39
+ google-auth==2.29.0
40
+ grpcio==1.62.1
41
+ habana-media-loader==1.15.1.15
42
+ habana-pyhlml==1.15.1.15
43
+ habana-torch-dataloader==1.15.1.15
44
+ habana-torch-plugin==1.15.1.15
45
+ habana_gpu_migration==1.15.1.15
46
+ habana_quantization_toolkit==1.15.1.15
47
+ hjson==3.1.0
48
+ huggingface-hub==0.23.2
49
+ identify==2.5.35
50
+ idna==3.7
51
+ iniconfig==2.0.0
52
+ joblib==1.4.2
53
+ jsonlines==4.0.0
54
+ lightning-habana==1.4.0
55
+ lightning-utilities==0.11.2
56
+ lightning==2.2.0.post0
57
+ lm_eval==0.4.2
58
+ lm_eval==0.4.2
59
+ lm_eval==0.4.2
60
+ lxml==5.2.2
61
+ mbstrdecoder==1.1.3
62
+ more-itertools==10.2.0
63
+ mpi4py==3.1.4
64
+ mpmath==1.3.0
65
+ multidict==6.0.5
66
+ multiprocess==0.70.16
67
+ networkx==3.3
68
+ ninja==1.11.1.1
69
+ nltk==3.8.1
70
+ nodeenv==1.8.0
71
+ numexpr==2.10.0
72
+ numpy==1.23.5
73
+ oauthlib==3.2.2
74
+ packaging==24.0
75
+ pandas==2.0.1
76
+ pathspec==0.12.1
77
+ pathvalidate==3.2.0
78
+ peft==0.11.1
79
+ perfetto==0.7.0
80
+ pip==22.0.2
81
+ pip==23.3.1
82
+ platformdirs==4.2.0
83
+ pluggy==1.4.0
84
+ portalocker==2.8.2
85
+ pre-commit==3.3.3
86
+ protobuf==3.20.3
87
+ psutil==5.9.8
88
+ py-cpuinfo==9.0.0
89
+ pyarrow-hotfix==0.6
90
+ pyarrow==16.1.0
91
+ pyasn1==0.6.0
92
+ pyasn1_modules==0.4.0
93
+ pybind11==2.10.4
94
+ pycparser==2.22
95
+ pydantic==1.10.13
96
+ pynvml==8.0.4
97
+ pytablewriter==1.2.0
98
+ pytest==8.1.1
99
+ python-dateutil==2.9.0.post0
100
+ pytorch-lightning==2.2.2
101
+ pytz==2024.1
102
+ regex==2023.5.5
103
+ requests-oauthlib==2.0.0
104
+ requests==2.31.0
105
+ rouge_score==0.1.2
106
+ rsa==4.9
107
+ sacrebleu==2.4.2
108
+ safetensors==0.4.3
109
+ scikit-learn==1.5.0
110
+ scipy==1.13.1
111
+ sentencepiece==0.2.0
112
+ sentry-sdk==2.3.1
113
+ setproctitle==1.3.3
114
+ setuptools==59.6.0
115
+ setuptools==69.5.1
116
+ six==1.16.0
117
+ smmap==5.0.1
118
+ sqlitedict==2.1.0
119
+ symengine==0.11.0
120
+ sympy==1.12
121
+ tabledata==1.3.3
122
+ tabulate==0.9.0
123
+ tcolorpy==0.1.6
124
+ tdqm==0.0.1
125
+ tensorboard-data-server==0.6.1
126
+ tensorboard-plugin-wit==1.8.1
127
+ tensorboard==2.11.2
128
+ threadpoolctl==3.5.0
129
+ tokenizers==0.15.2
130
+ tomli==2.0.1
131
+ torch==2.2.0a0+git8964477
132
+ torch_tb_profiler==0.4.0
133
+ torchaudio==2.2.0+08901ad
134
+ torchdata==0.7.1+5e6f7b7
135
+ torchmetrics==1.3.2
136
+ torchtext==0.17.0+400da5c
137
+ torchvision==0.17.0+b2383d4
138
+ tqdm-multiprocess==0.0.11
139
+ tqdm==4.66.2
140
+ transformers==4.36.2
141
+ typepy==1.3.2
142
+ typing_extensions==4.11.0
143
+ tzdata==2024.1
144
+ urllib3==1.26.18
145
+ virtualenv==20.25.1
146
+ wandb==0.17.0
147
+ wheel==0.37.1
148
+ wheel==0.43.0
149
+ word2number==1.1
150
+ xxhash==3.4.1
151
+ yamllint==1.35.1
152
+ yarl==1.9.4
153
+ zstandard==0.22.0
lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/wandb-metadata.json ADDED
@@ -0,0 +1,850 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "os": "Linux-5.15.0-92-generic-x86_64-with-glibc2.35",
3
+ "python": "3.10.12",
4
+ "heartbeatAt": "2024-05-30T08:47:36.464779",
5
+ "startedAt": "2024-05-30T08:47:35.929688",
6
+ "docker": null,
7
+ "cuda": null,
8
+ "args": [
9
+ "--model",
10
+ "hf",
11
+ "--model_args",
12
+ "pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/",
13
+ "--tasks",
14
+ "winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq",
15
+ "--batch_size",
16
+ "auto",
17
+ "--wandb_args",
18
+ "project=english-eval,group=exp2,name=global_step10000"
19
+ ],
20
+ "state": "running",
21
+ "program": "-m lm_eval.__main__",
22
+ "codePathLocal": null,
23
+ "git": {
24
+ "remote": "https://github.com/EleutherAI/lm-evaluation-harness",
25
+ "commit": null
26
+ },
27
+ "email": null,
28
+ "root": "/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness",
29
+ "host": "peacock-evaluation-debug-5-worker-0",
30
+ "username": "root",
31
+ "executable": "/usr/bin/python3",
32
+ "cpu_count": 80,
33
+ "cpu_count_logical": 160,
34
+ "cpu_freq": {
35
+ "current": 2327.43190625,
36
+ "min": 800.0,
37
+ "max": 3400.0
38
+ },
39
+ "cpu_freq_per_core": [
40
+ {
41
+ "current": 3400.0,
42
+ "min": 800.0,
43
+ "max": 3400.0
44
+ },
45
+ {
46
+ "current": 3400.0,
47
+ "min": 800.0,
48
+ "max": 3400.0
49
+ },
50
+ {
51
+ "current": 2300.0,
52
+ "min": 800.0,
53
+ "max": 3400.0
54
+ },
55
+ {
56
+ "current": 2300.0,
57
+ "min": 800.0,
58
+ "max": 3400.0
59
+ },
60
+ {
61
+ "current": 2300.0,
62
+ "min": 800.0,
63
+ "max": 3400.0
64
+ },
65
+ {
66
+ "current": 2300.0,
67
+ "min": 800.0,
68
+ "max": 3400.0
69
+ },
70
+ {
71
+ "current": 2300.0,
72
+ "min": 800.0,
73
+ "max": 3400.0
74
+ },
75
+ {
76
+ "current": 2300.0,
77
+ "min": 800.0,
78
+ "max": 3400.0
79
+ },
80
+ {
81
+ "current": 2300.0,
82
+ "min": 800.0,
83
+ "max": 3400.0
84
+ },
85
+ {
86
+ "current": 2300.0,
87
+ "min": 800.0,
88
+ "max": 3400.0
89
+ },
90
+ {
91
+ "current": 2300.0,
92
+ "min": 800.0,
93
+ "max": 3400.0
94
+ },
95
+ {
96
+ "current": 2300.0,
97
+ "min": 800.0,
98
+ "max": 3400.0
99
+ },
100
+ {
101
+ "current": 2300.0,
102
+ "min": 800.0,
103
+ "max": 3400.0
104
+ },
105
+ {
106
+ "current": 2300.0,
107
+ "min": 800.0,
108
+ "max": 3400.0
109
+ },
110
+ {
111
+ "current": 2300.0,
112
+ "min": 800.0,
113
+ "max": 3400.0
114
+ },
115
+ {
116
+ "current": 2300.0,
117
+ "min": 800.0,
118
+ "max": 3400.0
119
+ },
120
+ {
121
+ "current": 2300.0,
122
+ "min": 800.0,
123
+ "max": 3400.0
124
+ },
125
+ {
126
+ "current": 2300.0,
127
+ "min": 800.0,
128
+ "max": 3400.0
129
+ },
130
+ {
131
+ "current": 2300.0,
132
+ "min": 800.0,
133
+ "max": 3400.0
134
+ },
135
+ {
136
+ "current": 2300.0,
137
+ "min": 800.0,
138
+ "max": 3400.0
139
+ },
140
+ {
141
+ "current": 2300.0,
142
+ "min": 800.0,
143
+ "max": 3400.0
144
+ },
145
+ {
146
+ "current": 2300.0,
147
+ "min": 800.0,
148
+ "max": 3400.0
149
+ },
150
+ {
151
+ "current": 2300.0,
152
+ "min": 800.0,
153
+ "max": 3400.0
154
+ },
155
+ {
156
+ "current": 2300.0,
157
+ "min": 800.0,
158
+ "max": 3400.0
159
+ },
160
+ {
161
+ "current": 2300.0,
162
+ "min": 800.0,
163
+ "max": 3400.0
164
+ },
165
+ {
166
+ "current": 2300.0,
167
+ "min": 800.0,
168
+ "max": 3400.0
169
+ },
170
+ {
171
+ "current": 2300.0,
172
+ "min": 800.0,
173
+ "max": 3400.0
174
+ },
175
+ {
176
+ "current": 2300.0,
177
+ "min": 800.0,
178
+ "max": 3400.0
179
+ },
180
+ {
181
+ "current": 2300.0,
182
+ "min": 800.0,
183
+ "max": 3400.0
184
+ },
185
+ {
186
+ "current": 2300.0,
187
+ "min": 800.0,
188
+ "max": 3400.0
189
+ },
190
+ {
191
+ "current": 2300.0,
192
+ "min": 800.0,
193
+ "max": 3400.0
194
+ },
195
+ {
196
+ "current": 2300.0,
197
+ "min": 800.0,
198
+ "max": 3400.0
199
+ },
200
+ {
201
+ "current": 2300.0,
202
+ "min": 800.0,
203
+ "max": 3400.0
204
+ },
205
+ {
206
+ "current": 2300.0,
207
+ "min": 800.0,
208
+ "max": 3400.0
209
+ },
210
+ {
211
+ "current": 2300.0,
212
+ "min": 800.0,
213
+ "max": 3400.0
214
+ },
215
+ {
216
+ "current": 2300.0,
217
+ "min": 800.0,
218
+ "max": 3400.0
219
+ },
220
+ {
221
+ "current": 2300.0,
222
+ "min": 800.0,
223
+ "max": 3400.0
224
+ },
225
+ {
226
+ "current": 2300.0,
227
+ "min": 800.0,
228
+ "max": 3400.0
229
+ },
230
+ {
231
+ "current": 2300.0,
232
+ "min": 800.0,
233
+ "max": 3400.0
234
+ },
235
+ {
236
+ "current": 2300.0,
237
+ "min": 800.0,
238
+ "max": 3400.0
239
+ },
240
+ {
241
+ "current": 3400.0,
242
+ "min": 800.0,
243
+ "max": 3400.0
244
+ },
245
+ {
246
+ "current": 2300.0,
247
+ "min": 800.0,
248
+ "max": 3400.0
249
+ },
250
+ {
251
+ "current": 2300.0,
252
+ "min": 800.0,
253
+ "max": 3400.0
254
+ },
255
+ {
256
+ "current": 2300.0,
257
+ "min": 800.0,
258
+ "max": 3400.0
259
+ },
260
+ {
261
+ "current": 2300.0,
262
+ "min": 800.0,
263
+ "max": 3400.0
264
+ },
265
+ {
266
+ "current": 2300.0,
267
+ "min": 800.0,
268
+ "max": 3400.0
269
+ },
270
+ {
271
+ "current": 2300.0,
272
+ "min": 800.0,
273
+ "max": 3400.0
274
+ },
275
+ {
276
+ "current": 2300.0,
277
+ "min": 800.0,
278
+ "max": 3400.0
279
+ },
280
+ {
281
+ "current": 2300.0,
282
+ "min": 800.0,
283
+ "max": 3400.0
284
+ },
285
+ {
286
+ "current": 2300.0,
287
+ "min": 800.0,
288
+ "max": 3400.0
289
+ },
290
+ {
291
+ "current": 2300.0,
292
+ "min": 800.0,
293
+ "max": 3400.0
294
+ },
295
+ {
296
+ "current": 2300.0,
297
+ "min": 800.0,
298
+ "max": 3400.0
299
+ },
300
+ {
301
+ "current": 2300.0,
302
+ "min": 800.0,
303
+ "max": 3400.0
304
+ },
305
+ {
306
+ "current": 2300.0,
307
+ "min": 800.0,
308
+ "max": 3400.0
309
+ },
310
+ {
311
+ "current": 2300.0,
312
+ "min": 800.0,
313
+ "max": 3400.0
314
+ },
315
+ {
316
+ "current": 2300.0,
317
+ "min": 800.0,
318
+ "max": 3400.0
319
+ },
320
+ {
321
+ "current": 2300.0,
322
+ "min": 800.0,
323
+ "max": 3400.0
324
+ },
325
+ {
326
+ "current": 2300.0,
327
+ "min": 800.0,
328
+ "max": 3400.0
329
+ },
330
+ {
331
+ "current": 2300.0,
332
+ "min": 800.0,
333
+ "max": 3400.0
334
+ },
335
+ {
336
+ "current": 2300.0,
337
+ "min": 800.0,
338
+ "max": 3400.0
339
+ },
340
+ {
341
+ "current": 2300.0,
342
+ "min": 800.0,
343
+ "max": 3400.0
344
+ },
345
+ {
346
+ "current": 2300.0,
347
+ "min": 800.0,
348
+ "max": 3400.0
349
+ },
350
+ {
351
+ "current": 2300.0,
352
+ "min": 800.0,
353
+ "max": 3400.0
354
+ },
355
+ {
356
+ "current": 2300.0,
357
+ "min": 800.0,
358
+ "max": 3400.0
359
+ },
360
+ {
361
+ "current": 2300.0,
362
+ "min": 800.0,
363
+ "max": 3400.0
364
+ },
365
+ {
366
+ "current": 2300.0,
367
+ "min": 800.0,
368
+ "max": 3400.0
369
+ },
370
+ {
371
+ "current": 2300.0,
372
+ "min": 800.0,
373
+ "max": 3400.0
374
+ },
375
+ {
376
+ "current": 2300.0,
377
+ "min": 800.0,
378
+ "max": 3400.0
379
+ },
380
+ {
381
+ "current": 2300.0,
382
+ "min": 800.0,
383
+ "max": 3400.0
384
+ },
385
+ {
386
+ "current": 2300.0,
387
+ "min": 800.0,
388
+ "max": 3400.0
389
+ },
390
+ {
391
+ "current": 2300.0,
392
+ "min": 800.0,
393
+ "max": 3400.0
394
+ },
395
+ {
396
+ "current": 2300.0,
397
+ "min": 800.0,
398
+ "max": 3400.0
399
+ },
400
+ {
401
+ "current": 2300.0,
402
+ "min": 800.0,
403
+ "max": 3400.0
404
+ },
405
+ {
406
+ "current": 2300.0,
407
+ "min": 800.0,
408
+ "max": 3400.0
409
+ },
410
+ {
411
+ "current": 2300.0,
412
+ "min": 800.0,
413
+ "max": 3400.0
414
+ },
415
+ {
416
+ "current": 2300.0,
417
+ "min": 800.0,
418
+ "max": 3400.0
419
+ },
420
+ {
421
+ "current": 2300.0,
422
+ "min": 800.0,
423
+ "max": 3400.0
424
+ },
425
+ {
426
+ "current": 2300.0,
427
+ "min": 800.0,
428
+ "max": 3400.0
429
+ },
430
+ {
431
+ "current": 2300.0,
432
+ "min": 800.0,
433
+ "max": 3400.0
434
+ },
435
+ {
436
+ "current": 2300.0,
437
+ "min": 800.0,
438
+ "max": 3400.0
439
+ },
440
+ {
441
+ "current": 3399.997,
442
+ "min": 800.0,
443
+ "max": 3400.0
444
+ },
445
+ {
446
+ "current": 2300.0,
447
+ "min": 800.0,
448
+ "max": 3400.0
449
+ },
450
+ {
451
+ "current": 2300.0,
452
+ "min": 800.0,
453
+ "max": 3400.0
454
+ },
455
+ {
456
+ "current": 2300.0,
457
+ "min": 800.0,
458
+ "max": 3400.0
459
+ },
460
+ {
461
+ "current": 2300.0,
462
+ "min": 800.0,
463
+ "max": 3400.0
464
+ },
465
+ {
466
+ "current": 2300.0,
467
+ "min": 800.0,
468
+ "max": 3400.0
469
+ },
470
+ {
471
+ "current": 2300.0,
472
+ "min": 800.0,
473
+ "max": 3400.0
474
+ },
475
+ {
476
+ "current": 2300.0,
477
+ "min": 800.0,
478
+ "max": 3400.0
479
+ },
480
+ {
481
+ "current": 2300.0,
482
+ "min": 800.0,
483
+ "max": 3400.0
484
+ },
485
+ {
486
+ "current": 2300.0,
487
+ "min": 800.0,
488
+ "max": 3400.0
489
+ },
490
+ {
491
+ "current": 2300.0,
492
+ "min": 800.0,
493
+ "max": 3400.0
494
+ },
495
+ {
496
+ "current": 2300.0,
497
+ "min": 800.0,
498
+ "max": 3400.0
499
+ },
500
+ {
501
+ "current": 2300.0,
502
+ "min": 800.0,
503
+ "max": 3400.0
504
+ },
505
+ {
506
+ "current": 2300.0,
507
+ "min": 800.0,
508
+ "max": 3400.0
509
+ },
510
+ {
511
+ "current": 2300.0,
512
+ "min": 800.0,
513
+ "max": 3400.0
514
+ },
515
+ {
516
+ "current": 2300.0,
517
+ "min": 800.0,
518
+ "max": 3400.0
519
+ },
520
+ {
521
+ "current": 2300.0,
522
+ "min": 800.0,
523
+ "max": 3400.0
524
+ },
525
+ {
526
+ "current": 2300.0,
527
+ "min": 800.0,
528
+ "max": 3400.0
529
+ },
530
+ {
531
+ "current": 2300.0,
532
+ "min": 800.0,
533
+ "max": 3400.0
534
+ },
535
+ {
536
+ "current": 2300.0,
537
+ "min": 800.0,
538
+ "max": 3400.0
539
+ },
540
+ {
541
+ "current": 2300.0,
542
+ "min": 800.0,
543
+ "max": 3400.0
544
+ },
545
+ {
546
+ "current": 2300.0,
547
+ "min": 800.0,
548
+ "max": 3400.0
549
+ },
550
+ {
551
+ "current": 2300.0,
552
+ "min": 800.0,
553
+ "max": 3400.0
554
+ },
555
+ {
556
+ "current": 2300.0,
557
+ "min": 800.0,
558
+ "max": 3400.0
559
+ },
560
+ {
561
+ "current": 2300.0,
562
+ "min": 800.0,
563
+ "max": 3400.0
564
+ },
565
+ {
566
+ "current": 2300.0,
567
+ "min": 800.0,
568
+ "max": 3400.0
569
+ },
570
+ {
571
+ "current": 2300.0,
572
+ "min": 800.0,
573
+ "max": 3400.0
574
+ },
575
+ {
576
+ "current": 2300.0,
577
+ "min": 800.0,
578
+ "max": 3400.0
579
+ },
580
+ {
581
+ "current": 2300.0,
582
+ "min": 800.0,
583
+ "max": 3400.0
584
+ },
585
+ {
586
+ "current": 2300.0,
587
+ "min": 800.0,
588
+ "max": 3400.0
589
+ },
590
+ {
591
+ "current": 2300.0,
592
+ "min": 800.0,
593
+ "max": 3400.0
594
+ },
595
+ {
596
+ "current": 2300.0,
597
+ "min": 800.0,
598
+ "max": 3400.0
599
+ },
600
+ {
601
+ "current": 2300.0,
602
+ "min": 800.0,
603
+ "max": 3400.0
604
+ },
605
+ {
606
+ "current": 2300.0,
607
+ "min": 800.0,
608
+ "max": 3400.0
609
+ },
610
+ {
611
+ "current": 2300.0,
612
+ "min": 800.0,
613
+ "max": 3400.0
614
+ },
615
+ {
616
+ "current": 2300.0,
617
+ "min": 800.0,
618
+ "max": 3400.0
619
+ },
620
+ {
621
+ "current": 2300.0,
622
+ "min": 800.0,
623
+ "max": 3400.0
624
+ },
625
+ {
626
+ "current": 2300.0,
627
+ "min": 800.0,
628
+ "max": 3400.0
629
+ },
630
+ {
631
+ "current": 2300.0,
632
+ "min": 800.0,
633
+ "max": 3400.0
634
+ },
635
+ {
636
+ "current": 2300.0,
637
+ "min": 800.0,
638
+ "max": 3400.0
639
+ },
640
+ {
641
+ "current": 2300.0,
642
+ "min": 800.0,
643
+ "max": 3400.0
644
+ },
645
+ {
646
+ "current": 2300.0,
647
+ "min": 800.0,
648
+ "max": 3400.0
649
+ },
650
+ {
651
+ "current": 2300.0,
652
+ "min": 800.0,
653
+ "max": 3400.0
654
+ },
655
+ {
656
+ "current": 2300.0,
657
+ "min": 800.0,
658
+ "max": 3400.0
659
+ },
660
+ {
661
+ "current": 2300.0,
662
+ "min": 800.0,
663
+ "max": 3400.0
664
+ },
665
+ {
666
+ "current": 2300.0,
667
+ "min": 800.0,
668
+ "max": 3400.0
669
+ },
670
+ {
671
+ "current": 2300.0,
672
+ "min": 800.0,
673
+ "max": 3400.0
674
+ },
675
+ {
676
+ "current": 2300.0,
677
+ "min": 800.0,
678
+ "max": 3400.0
679
+ },
680
+ {
681
+ "current": 2300.0,
682
+ "min": 800.0,
683
+ "max": 3400.0
684
+ },
685
+ {
686
+ "current": 2300.0,
687
+ "min": 800.0,
688
+ "max": 3400.0
689
+ },
690
+ {
691
+ "current": 2300.0,
692
+ "min": 800.0,
693
+ "max": 3400.0
694
+ },
695
+ {
696
+ "current": 2300.0,
697
+ "min": 800.0,
698
+ "max": 3400.0
699
+ },
700
+ {
701
+ "current": 2300.0,
702
+ "min": 800.0,
703
+ "max": 3400.0
704
+ },
705
+ {
706
+ "current": 2300.0,
707
+ "min": 800.0,
708
+ "max": 3400.0
709
+ },
710
+ {
711
+ "current": 2300.0,
712
+ "min": 800.0,
713
+ "max": 3400.0
714
+ },
715
+ {
716
+ "current": 2300.0,
717
+ "min": 800.0,
718
+ "max": 3400.0
719
+ },
720
+ {
721
+ "current": 2300.0,
722
+ "min": 800.0,
723
+ "max": 3400.0
724
+ },
725
+ {
726
+ "current": 2300.0,
727
+ "min": 800.0,
728
+ "max": 3400.0
729
+ },
730
+ {
731
+ "current": 2300.0,
732
+ "min": 800.0,
733
+ "max": 3400.0
734
+ },
735
+ {
736
+ "current": 2300.0,
737
+ "min": 800.0,
738
+ "max": 3400.0
739
+ },
740
+ {
741
+ "current": 2300.0,
742
+ "min": 800.0,
743
+ "max": 3400.0
744
+ },
745
+ {
746
+ "current": 2300.0,
747
+ "min": 800.0,
748
+ "max": 3400.0
749
+ },
750
+ {
751
+ "current": 2300.0,
752
+ "min": 800.0,
753
+ "max": 3400.0
754
+ },
755
+ {
756
+ "current": 2300.0,
757
+ "min": 800.0,
758
+ "max": 3400.0
759
+ },
760
+ {
761
+ "current": 2300.0,
762
+ "min": 800.0,
763
+ "max": 3400.0
764
+ },
765
+ {
766
+ "current": 2300.0,
767
+ "min": 800.0,
768
+ "max": 3400.0
769
+ },
770
+ {
771
+ "current": 2300.0,
772
+ "min": 800.0,
773
+ "max": 3400.0
774
+ },
775
+ {
776
+ "current": 2300.0,
777
+ "min": 800.0,
778
+ "max": 3400.0
779
+ },
780
+ {
781
+ "current": 2300.0,
782
+ "min": 800.0,
783
+ "max": 3400.0
784
+ },
785
+ {
786
+ "current": 2300.0,
787
+ "min": 800.0,
788
+ "max": 3400.0
789
+ },
790
+ {
791
+ "current": 2300.0,
792
+ "min": 800.0,
793
+ "max": 3400.0
794
+ },
795
+ {
796
+ "current": 2300.0,
797
+ "min": 800.0,
798
+ "max": 3400.0
799
+ },
800
+ {
801
+ "current": 2300.0,
802
+ "min": 800.0,
803
+ "max": 3400.0
804
+ },
805
+ {
806
+ "current": 2300.0,
807
+ "min": 800.0,
808
+ "max": 3400.0
809
+ },
810
+ {
811
+ "current": 2300.0,
812
+ "min": 800.0,
813
+ "max": 3400.0
814
+ },
815
+ {
816
+ "current": 2300.0,
817
+ "min": 800.0,
818
+ "max": 3400.0
819
+ },
820
+ {
821
+ "current": 2300.0,
822
+ "min": 800.0,
823
+ "max": 3400.0
824
+ },
825
+ {
826
+ "current": 2300.0,
827
+ "min": 800.0,
828
+ "max": 3400.0
829
+ },
830
+ {
831
+ "current": 2300.0,
832
+ "min": 800.0,
833
+ "max": 3400.0
834
+ },
835
+ {
836
+ "current": 2300.0,
837
+ "min": 800.0,
838
+ "max": 3400.0
839
+ }
840
+ ],
841
+ "disk": {
842
+ "/": {
843
+ "total": 877.6341285705566,
844
+ "used": 172.39404296875
845
+ }
846
+ },
847
+ "memory": {
848
+ "total": 1007.4379730224609
849
+ }
850
+ }
lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug-internal.log ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-30 08:47:35,952 INFO StreamThr :904 [internal.py:wandb_internal():85] W&B internal server running at pid: 904, started at: 2024-05-30 08:47:35.950613
2
+ 2024-05-30 08:47:35,956 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status
3
+ 2024-05-30 08:47:35,958 INFO WriterThread:904 [datastore.py:open_for_write():87] open: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/run-wtsee6ig.wandb
4
+ 2024-05-30 08:47:35,960 DEBUG SenderThread:904 [sender.py:send():378] send: header
5
+ 2024-05-30 08:47:35,963 DEBUG SenderThread:904 [sender.py:send():378] send: run
6
+ 2024-05-30 08:47:36,265 INFO SenderThread:904 [dir_watcher.py:__init__():211] watching files in: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files
7
+ 2024-05-30 08:47:36,265 INFO SenderThread:904 [sender.py:_start_run_threads():1123] run started: wtsee6ig with start time 1717058855.951258
8
+ 2024-05-30 08:47:36,269 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: check_version
9
+ 2024-05-30 08:47:36,269 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: check_version
10
+ 2024-05-30 08:47:36,386 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: run_start
11
+ 2024-05-30 08:47:36,389 DEBUG HandlerThread:904 [system_info.py:__init__():26] System info init
12
+ 2024-05-30 08:47:36,389 DEBUG HandlerThread:904 [system_info.py:__init__():41] System info init done
13
+ 2024-05-30 08:47:36,389 INFO HandlerThread:904 [system_monitor.py:start():194] Starting system monitor
14
+ 2024-05-30 08:47:36,389 INFO SystemMonitor:904 [system_monitor.py:_start():158] Starting system asset monitoring threads
15
+ 2024-05-30 08:47:36,389 INFO HandlerThread:904 [system_monitor.py:probe():214] Collecting system info
16
+ 2024-05-30 08:47:36,396 INFO SystemMonitor:904 [interfaces.py:start():188] Started cpu monitoring
17
+ 2024-05-30 08:47:36,397 INFO SystemMonitor:904 [interfaces.py:start():188] Started disk monitoring
18
+ 2024-05-30 08:47:36,402 INFO SystemMonitor:904 [interfaces.py:start():188] Started memory monitoring
19
+ 2024-05-30 08:47:36,403 INFO SystemMonitor:904 [interfaces.py:start():188] Started network monitoring
20
+ 2024-05-30 08:47:36,464 DEBUG HandlerThread:904 [system_info.py:probe():150] Probing system
21
+ 2024-05-30 08:47:36,468 DEBUG HandlerThread:904 [system_info.py:_probe_git():135] Probing git
22
+ 2024-05-30 08:47:36,477 ERROR HandlerThread:904 [gitlib.py:root():92] git root error: Cmd('git') failed due to: exit code(128)
23
+ cmdline: git rev-parse --show-toplevel
24
+ stderr: 'fatal: detected dubious ownership in repository at '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
25
+ To add an exception for this directory, call:
26
+
27
+ git config --global --add safe.directory /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness'
28
+ 2024-05-30 08:47:36,477 DEBUG HandlerThread:904 [system_info.py:_probe_git():143] Probing git done
29
+ 2024-05-30 08:47:36,477 DEBUG HandlerThread:904 [system_info.py:probe():198] Probing system done
30
+ 2024-05-30 08:47:36,478 DEBUG HandlerThread:904 [system_monitor.py:probe():223] {'os': 'Linux-5.15.0-92-generic-x86_64-with-glibc2.35', 'python': '3.10.12', 'heartbeatAt': '2024-05-30T08:47:36.464779', 'startedAt': '2024-05-30T08:47:35.929688', 'docker': None, 'cuda': None, 'args': ('--model', 'hf', '--model_args', 'pretrained=/mnt/weka/peacock/experiments/llama/eval/checkpoint-english/llamav2-3b/hf/global_step10000,tokenizer=/mnt/weka/peacock/tokenization/trained-tokenizer/enhiben_50k_hf/', '--tasks', 'winogrande,sst2,mrpc,arc_easy,copa,piqa,boolq', '--batch_size', 'auto', '--wandb_args', 'project=english-eval,group=exp2,name=global_step10000'), 'state': 'running', 'program': '-m lm_eval.__main__', 'codePathLocal': None, 'git': {'remote': 'https://github.com/EleutherAI/lm-evaluation-harness', 'commit': None}, 'email': None, 'root': '/mnt/weka/peacock/idc/cronscript/lm-evaluation-harness', 'host': 'peacock-evaluation-debug-5-worker-0', 'username': 'root', 'executable': '/usr/bin/python3', 'cpu_count': 80, 'cpu_count_logical': 160, 'cpu_freq': {'current': 2327.43190625, 'min': 800.0, 'max': 3400.0}, 'cpu_freq_per_core': [{'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3400.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 3399.997, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}, {'current': 2300.0, 'min': 800.0, 'max': 3400.0}], 'disk': {'/': {'total': 877.6341285705566, 'used': 172.39404296875}}, 'memory': {'total': 1007.4379730224609}}
31
+ 2024-05-30 08:47:36,478 INFO HandlerThread:904 [system_monitor.py:probe():224] Finished collecting system info
32
+ 2024-05-30 08:47:36,478 INFO HandlerThread:904 [system_monitor.py:probe():227] Publishing system info
33
+ 2024-05-30 08:47:36,482 INFO HandlerThread:904 [system_monitor.py:probe():229] Finished publishing system info
34
+ 2024-05-30 08:47:36,489 DEBUG SenderThread:904 [sender.py:send():378] send: files
35
+ 2024-05-30 08:47:36,489 INFO SenderThread:904 [sender.py:_save_file():1389] saving file wandb-metadata.json with policy now
36
+ 2024-05-30 08:47:36,671 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: python_packages
37
+ 2024-05-30 08:47:36,671 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: python_packages
38
+ 2024-05-30 08:47:36,673 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: stop_status
39
+ 2024-05-30 08:47:36,673 DEBUG SenderThread:904 [sender.py:send_request():405] send_request: stop_status
40
+ 2024-05-30 08:47:36,785 DEBUG SenderThread:904 [sender.py:send():378] send: telemetry
41
+ 2024-05-30 08:47:37,119 INFO wandb-upload_0:904 [upload_job.py:push():130] Uploaded file /tmp/tmp7qct_09uwandb/yu5q9u35-wandb-metadata.json
42
+ 2024-05-30 08:47:37,267 INFO Thread-12 :904 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/wandb-metadata.json
43
+ 2024-05-30 08:47:37,267 INFO Thread-12 :904 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log
44
+ 2024-05-30 08:47:37,268 INFO Thread-12 :904 [dir_watcher.py:_on_file_created():271] file/dir created: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/requirements.txt
45
+ 2024-05-30 08:47:39,267 INFO Thread-12 :904 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log
46
+ 2024-05-30 08:47:41,809 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status_report
47
+ 2024-05-30 08:47:46,853 DEBUG HandlerThread:904 [handler.py:handle_request():158] handle_request: status_report
48
+ 2024-05-30 08:47:47,274 INFO Thread-12 :904 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log
49
+ 2024-05-30 08:47:49,283 INFO Thread-12 :904 [dir_watcher.py:_on_file_modified():288] file/dir modified: /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/files/output.log
lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug.log ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Current SDK version is 0.17.0
2
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Configure stats pid to 748
3
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Loading settings from /root/.config/wandb/settings
4
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Loading settings from /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/settings
5
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Loading settings from environment variables: {}
6
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Applying setup settings: {'_disable_service': False}
7
+ 2024-05-30 08:47:35,945 WARNING MainThread:748 [wandb_setup.py:_flush():76] Could not find program at -m lm_eval.__main__
8
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Inferring run settings from compute environment: {'program_relpath': None, 'program': '-m lm_eval.__main__'}
9
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_setup.py:_flush():76] Applying login settings: {}
10
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:_log_setup():520] Logging user logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug.log
11
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:_log_setup():521] Logging internal logs to /mnt/weka/peacock/idc/cronscript/lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/logs/debug-internal.log
12
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:init():560] calling init triggers
13
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:init():567] wandb.init called with sweep_config: {}
14
+ config: {}
15
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:init():610] starting backend
16
+ 2024-05-30 08:47:35,945 INFO MainThread:748 [wandb_init.py:init():614] setting up manager
17
+ 2024-05-30 08:47:35,949 INFO MainThread:748 [backend.py:_multiprocessing_setup():105] multiprocessing start_methods=fork,spawn,forkserver, using: spawn
18
+ 2024-05-30 08:47:35,951 INFO MainThread:748 [wandb_init.py:init():622] backend started and connected
19
+ 2024-05-30 08:47:35,954 INFO MainThread:748 [wandb_init.py:init():711] updated telemetry
20
+ 2024-05-30 08:47:35,962 INFO MainThread:748 [wandb_init.py:init():744] communicating run to backend with 90.0 second timeout
21
+ 2024-05-30 08:47:36,268 INFO MainThread:748 [wandb_run.py:_on_init():2396] communicating current version
22
+ 2024-05-30 08:47:36,380 INFO MainThread:748 [wandb_run.py:_on_init():2405] got version response
23
+ 2024-05-30 08:47:36,380 INFO MainThread:748 [wandb_init.py:init():795] starting run threads in backend
24
+ 2024-05-30 08:47:36,672 INFO MainThread:748 [wandb_run.py:_console_start():2374] atexit reg
25
+ 2024-05-30 08:47:36,672 INFO MainThread:748 [wandb_run.py:_redirect():2229] redirect: wrap_raw
26
+ 2024-05-30 08:47:36,672 INFO MainThread:748 [wandb_run.py:_redirect():2294] Wrapping output streams.
27
+ 2024-05-30 08:47:36,672 INFO MainThread:748 [wandb_run.py:_redirect():2319] Redirects installed.
28
+ 2024-05-30 08:47:36,674 INFO MainThread:748 [wandb_init.py:init():838] run started, returning control to user process
lm-evaluation-harness/wandb/run-20240530_084735-wtsee6ig/run-wtsee6ig.wandb ADDED
File without changes
venv/lib/python3.10/site-packages/joblib/test/data/joblib_0.10.0_compressed_pickle_py34_np19.gz ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1abdb3ff5b555831f51f7ff00951e66a49277fc2aa787293f18cf8775be65023
3
+ size 794
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__init__.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
17
+
18
+
19
+ _import_structure = {
20
+ "configuration_audio_spectrogram_transformer": [
21
+ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
22
+ "ASTConfig",
23
+ ],
24
+ "feature_extraction_audio_spectrogram_transformer": ["ASTFeatureExtractor"],
25
+ }
26
+
27
+ try:
28
+ if not is_torch_available():
29
+ raise OptionalDependencyNotAvailable()
30
+ except OptionalDependencyNotAvailable:
31
+ pass
32
+ else:
33
+ _import_structure["modeling_audio_spectrogram_transformer"] = [
34
+ "AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
35
+ "ASTForAudioClassification",
36
+ "ASTModel",
37
+ "ASTPreTrainedModel",
38
+ ]
39
+
40
+
41
+ if TYPE_CHECKING:
42
+ from .configuration_audio_spectrogram_transformer import (
43
+ AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
44
+ ASTConfig,
45
+ )
46
+ from .feature_extraction_audio_spectrogram_transformer import ASTFeatureExtractor
47
+
48
+ try:
49
+ if not is_torch_available():
50
+ raise OptionalDependencyNotAvailable()
51
+ except OptionalDependencyNotAvailable:
52
+ pass
53
+ else:
54
+ from .modeling_audio_spectrogram_transformer import (
55
+ AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
56
+ ASTForAudioClassification,
57
+ ASTModel,
58
+ ASTPreTrainedModel,
59
+ )
60
+
61
+
62
+ else:
63
+ import sys
64
+
65
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.14 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/configuration_audio_spectrogram_transformer.cpython-310.pyc ADDED
Binary file (4.92 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/convert_audio_spectrogram_transformer_original_to_pytorch.cpython-310.pyc ADDED
Binary file (7.58 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/feature_extraction_audio_spectrogram_transformer.cpython-310.pyc ADDED
Binary file (8.07 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/__pycache__/modeling_audio_spectrogram_transformer.cpython-310.pyc ADDED
Binary file (19.8 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/configuration_audio_spectrogram_transformer.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 Google AI and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ Audio Spectogram Transformer (AST) model configuration"""
16
+
17
+
18
+ from ...configuration_utils import PretrainedConfig
19
+ from ...utils import logging
20
+
21
+
22
+ logger = logging.get_logger(__name__)
23
+
24
+
25
+ from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
26
+
27
+
28
+ class ASTConfig(PretrainedConfig):
29
+ r"""
30
+ This is the configuration class to store the configuration of a [`ASTModel`]. It is used to instantiate an AST
31
+ model according to the specified arguments, defining the model architecture. Instantiating a configuration with the
32
+ defaults will yield a similar configuration to that of the AST
33
+ [MIT/ast-finetuned-audioset-10-10-0.4593](https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593)
34
+ architecture.
35
+
36
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
37
+ documentation from [`PretrainedConfig`] for more information.
38
+
39
+ Args:
40
+ hidden_size (`int`, *optional*, defaults to 768):
41
+ Dimensionality of the encoder layers and the pooler layer.
42
+ num_hidden_layers (`int`, *optional*, defaults to 12):
43
+ Number of hidden layers in the Transformer encoder.
44
+ num_attention_heads (`int`, *optional*, defaults to 12):
45
+ Number of attention heads for each attention layer in the Transformer encoder.
46
+ intermediate_size (`int`, *optional*, defaults to 3072):
47
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
48
+ hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
49
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
50
+ `"relu"`, `"selu"` and `"gelu_new"` are supported.
51
+ hidden_dropout_prob (`float`, *optional*, defaults to 0.0):
52
+ The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
53
+ attention_probs_dropout_prob (`float`, *optional*, defaults to 0.0):
54
+ The dropout ratio for the attention probabilities.
55
+ initializer_range (`float`, *optional*, defaults to 0.02):
56
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
57
+ layer_norm_eps (`float`, *optional*, defaults to 1e-12):
58
+ The epsilon used by the layer normalization layers.
59
+ patch_size (`int`, *optional*, defaults to 16):
60
+ The size (resolution) of each patch.
61
+ qkv_bias (`bool`, *optional*, defaults to `True`):
62
+ Whether to add a bias to the queries, keys and values.
63
+ frequency_stride (`int`, *optional*, defaults to 10):
64
+ Frequency stride to use when patchifying the spectrograms.
65
+ time_stride (`int`, *optional*, defaults to 10):
66
+ Temporal stride to use when patchifying the spectrograms.
67
+ max_length (`int`, *optional*, defaults to 1024):
68
+ Temporal dimension of the spectrograms.
69
+ num_mel_bins (`int`, *optional*, defaults to 128):
70
+ Frequency dimension of the spectrograms (number of Mel-frequency bins).
71
+
72
+ Example:
73
+
74
+ ```python
75
+ >>> from transformers import ASTConfig, ASTModel
76
+
77
+ >>> # Initializing a AST MIT/ast-finetuned-audioset-10-10-0.4593 style configuration
78
+ >>> configuration = ASTConfig()
79
+
80
+ >>> # Initializing a model (with random weights) from the MIT/ast-finetuned-audioset-10-10-0.4593 style configuration
81
+ >>> model = ASTModel(configuration)
82
+
83
+ >>> # Accessing the model configuration
84
+ >>> configuration = model.config
85
+ ```"""
86
+
87
+ model_type = "audio-spectrogram-transformer"
88
+
89
+ def __init__(
90
+ self,
91
+ hidden_size=768,
92
+ num_hidden_layers=12,
93
+ num_attention_heads=12,
94
+ intermediate_size=3072,
95
+ hidden_act="gelu",
96
+ hidden_dropout_prob=0.0,
97
+ attention_probs_dropout_prob=0.0,
98
+ initializer_range=0.02,
99
+ layer_norm_eps=1e-12,
100
+ patch_size=16,
101
+ qkv_bias=True,
102
+ frequency_stride=10,
103
+ time_stride=10,
104
+ max_length=1024,
105
+ num_mel_bins=128,
106
+ **kwargs,
107
+ ):
108
+ super().__init__(**kwargs)
109
+
110
+ self.hidden_size = hidden_size
111
+ self.num_hidden_layers = num_hidden_layers
112
+ self.num_attention_heads = num_attention_heads
113
+ self.intermediate_size = intermediate_size
114
+ self.hidden_act = hidden_act
115
+ self.hidden_dropout_prob = hidden_dropout_prob
116
+ self.attention_probs_dropout_prob = attention_probs_dropout_prob
117
+ self.initializer_range = initializer_range
118
+ self.layer_norm_eps = layer_norm_eps
119
+ self.patch_size = patch_size
120
+ self.qkv_bias = qkv_bias
121
+ self.frequency_stride = frequency_stride
122
+ self.time_stride = time_stride
123
+ self.max_length = max_length
124
+ self.num_mel_bins = num_mel_bins
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/convert_audio_spectrogram_transformer_original_to_pytorch.py ADDED
@@ -0,0 +1,279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Convert Audio Spectrogram Transformer checkpoints from the original repository. URL: https://github.com/YuanGongND/ast"""
16
+
17
+
18
+ import argparse
19
+ import json
20
+ from pathlib import Path
21
+
22
+ import torch
23
+ import torchaudio
24
+ from datasets import load_dataset
25
+ from huggingface_hub import hf_hub_download
26
+
27
+ from transformers import ASTConfig, ASTFeatureExtractor, ASTForAudioClassification
28
+ from transformers.utils import logging
29
+
30
+
31
+ logging.set_verbosity_info()
32
+ logger = logging.get_logger(__name__)
33
+
34
+
35
+ def get_audio_spectrogram_transformer_config(model_name):
36
+ config = ASTConfig()
37
+
38
+ if "10-10" in model_name:
39
+ pass
40
+ elif "speech-commands" in model_name:
41
+ config.max_length = 128
42
+ elif "12-12" in model_name:
43
+ config.time_stride = 12
44
+ config.frequency_stride = 12
45
+ elif "14-14" in model_name:
46
+ config.time_stride = 14
47
+ config.frequency_stride = 14
48
+ elif "16-16" in model_name:
49
+ config.time_stride = 16
50
+ config.frequency_stride = 16
51
+ else:
52
+ raise ValueError("Model not supported")
53
+
54
+ repo_id = "huggingface/label-files"
55
+ if "speech-commands" in model_name:
56
+ config.num_labels = 35
57
+ filename = "speech-commands-v2-id2label.json"
58
+ else:
59
+ config.num_labels = 527
60
+ filename = "audioset-id2label.json"
61
+
62
+ id2label = json.load(open(hf_hub_download(repo_id, filename, repo_type="dataset"), "r"))
63
+ id2label = {int(k): v for k, v in id2label.items()}
64
+ config.id2label = id2label
65
+ config.label2id = {v: k for k, v in id2label.items()}
66
+
67
+ return config
68
+
69
+
70
+ def rename_key(name):
71
+ if "module.v" in name:
72
+ name = name.replace("module.v", "audio_spectrogram_transformer")
73
+ if "cls_token" in name:
74
+ name = name.replace("cls_token", "embeddings.cls_token")
75
+ if "dist_token" in name:
76
+ name = name.replace("dist_token", "embeddings.distillation_token")
77
+ if "pos_embed" in name:
78
+ name = name.replace("pos_embed", "embeddings.position_embeddings")
79
+ if "patch_embed.proj" in name:
80
+ name = name.replace("patch_embed.proj", "embeddings.patch_embeddings.projection")
81
+ # transformer blocks
82
+ if "blocks" in name:
83
+ name = name.replace("blocks", "encoder.layer")
84
+ if "attn.proj" in name:
85
+ name = name.replace("attn.proj", "attention.output.dense")
86
+ if "attn" in name:
87
+ name = name.replace("attn", "attention.self")
88
+ if "norm1" in name:
89
+ name = name.replace("norm1", "layernorm_before")
90
+ if "norm2" in name:
91
+ name = name.replace("norm2", "layernorm_after")
92
+ if "mlp.fc1" in name:
93
+ name = name.replace("mlp.fc1", "intermediate.dense")
94
+ if "mlp.fc2" in name:
95
+ name = name.replace("mlp.fc2", "output.dense")
96
+ # final layernorm
97
+ if "audio_spectrogram_transformer.norm" in name:
98
+ name = name.replace("audio_spectrogram_transformer.norm", "audio_spectrogram_transformer.layernorm")
99
+ # classifier head
100
+ if "module.mlp_head.0" in name:
101
+ name = name.replace("module.mlp_head.0", "classifier.layernorm")
102
+ if "module.mlp_head.1" in name:
103
+ name = name.replace("module.mlp_head.1", "classifier.dense")
104
+
105
+ return name
106
+
107
+
108
+ def convert_state_dict(orig_state_dict, config):
109
+ for key in orig_state_dict.copy().keys():
110
+ val = orig_state_dict.pop(key)
111
+
112
+ if "qkv" in key:
113
+ key_split = key.split(".")
114
+ layer_num = int(key_split[3])
115
+ dim = config.hidden_size
116
+ if "weight" in key:
117
+ orig_state_dict[
118
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.weight"
119
+ ] = val[:dim, :]
120
+ orig_state_dict[
121
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.weight"
122
+ ] = val[dim : dim * 2, :]
123
+ orig_state_dict[
124
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.weight"
125
+ ] = val[-dim:, :]
126
+ else:
127
+ orig_state_dict[
128
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.query.bias"
129
+ ] = val[:dim]
130
+ orig_state_dict[
131
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.key.bias"
132
+ ] = val[dim : dim * 2]
133
+ orig_state_dict[
134
+ f"audio_spectrogram_transformer.encoder.layer.{layer_num}.attention.attention.value.bias"
135
+ ] = val[-dim:]
136
+ else:
137
+ orig_state_dict[rename_key(key)] = val
138
+
139
+ return orig_state_dict
140
+
141
+
142
+ def remove_keys(state_dict):
143
+ ignore_keys = [
144
+ "module.v.head.weight",
145
+ "module.v.head.bias",
146
+ "module.v.head_dist.weight",
147
+ "module.v.head_dist.bias",
148
+ ]
149
+ for k in ignore_keys:
150
+ state_dict.pop(k, None)
151
+
152
+
153
+ @torch.no_grad()
154
+ def convert_audio_spectrogram_transformer_checkpoint(model_name, pytorch_dump_folder_path, push_to_hub=False):
155
+ """
156
+ Copy/paste/tweak model's weights to our Audio Spectrogram Transformer structure.
157
+ """
158
+ config = get_audio_spectrogram_transformer_config(model_name)
159
+
160
+ model_name_to_url = {
161
+ "ast-finetuned-audioset-10-10-0.4593": (
162
+ "https://www.dropbox.com/s/ca0b1v2nlxzyeb4/audioset_10_10_0.4593.pth?dl=1"
163
+ ),
164
+ "ast-finetuned-audioset-10-10-0.450": (
165
+ "https://www.dropbox.com/s/1tv0hovue1bxupk/audioset_10_10_0.4495.pth?dl=1"
166
+ ),
167
+ "ast-finetuned-audioset-10-10-0.448": (
168
+ "https://www.dropbox.com/s/6u5sikl4b9wo4u5/audioset_10_10_0.4483.pth?dl=1"
169
+ ),
170
+ "ast-finetuned-audioset-10-10-0.448-v2": (
171
+ "https://www.dropbox.com/s/kt6i0v9fvfm1mbq/audioset_10_10_0.4475.pth?dl=1"
172
+ ),
173
+ "ast-finetuned-audioset-12-12-0.447": (
174
+ "https://www.dropbox.com/s/snfhx3tizr4nuc8/audioset_12_12_0.4467.pth?dl=1"
175
+ ),
176
+ "ast-finetuned-audioset-14-14-0.443": (
177
+ "https://www.dropbox.com/s/z18s6pemtnxm4k7/audioset_14_14_0.4431.pth?dl=1"
178
+ ),
179
+ "ast-finetuned-audioset-16-16-0.442": (
180
+ "https://www.dropbox.com/s/mdsa4t1xmcimia6/audioset_16_16_0.4422.pth?dl=1"
181
+ ),
182
+ "ast-finetuned-speech-commands-v2": (
183
+ "https://www.dropbox.com/s/q0tbqpwv44pquwy/speechcommands_10_10_0.9812.pth?dl=1"
184
+ ),
185
+ }
186
+
187
+ # load original state_dict
188
+ checkpoint_url = model_name_to_url[model_name]
189
+ state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
190
+ # remove some keys
191
+ remove_keys(state_dict)
192
+ # rename some keys
193
+ new_state_dict = convert_state_dict(state_dict, config)
194
+
195
+ # load 🤗 model
196
+ model = ASTForAudioClassification(config)
197
+ model.eval()
198
+
199
+ model.load_state_dict(new_state_dict)
200
+
201
+ # verify outputs on dummy input
202
+ # source: https://github.com/YuanGongND/ast/blob/79e873b8a54d0a3b330dd522584ff2b9926cd581/src/run.py#L62
203
+ mean = -4.2677393 if "speech-commands" not in model_name else -6.845978
204
+ std = 4.5689974 if "speech-commands" not in model_name else 5.5654526
205
+ max_length = 1024 if "speech-commands" not in model_name else 128
206
+ feature_extractor = ASTFeatureExtractor(mean=mean, std=std, max_length=max_length)
207
+
208
+ if "speech-commands" in model_name:
209
+ dataset = load_dataset("speech_commands", "v0.02", split="validation")
210
+ waveform = dataset[0]["audio"]["array"]
211
+ else:
212
+ filepath = hf_hub_download(
213
+ repo_id="nielsr/audio-spectogram-transformer-checkpoint",
214
+ filename="sample_audio.flac",
215
+ repo_type="dataset",
216
+ )
217
+
218
+ waveform, _ = torchaudio.load(filepath)
219
+ waveform = waveform.squeeze().numpy()
220
+
221
+ inputs = feature_extractor(waveform, sampling_rate=16000, return_tensors="pt")
222
+
223
+ # forward pass
224
+ outputs = model(**inputs)
225
+ logits = outputs.logits
226
+
227
+ if model_name == "ast-finetuned-audioset-10-10-0.4593":
228
+ expected_slice = torch.tensor([-0.8760, -7.0042, -8.6602])
229
+ elif model_name == "ast-finetuned-audioset-10-10-0.450":
230
+ expected_slice = torch.tensor([-1.1986, -7.0903, -8.2718])
231
+ elif model_name == "ast-finetuned-audioset-10-10-0.448":
232
+ expected_slice = torch.tensor([-2.6128, -8.0080, -9.4344])
233
+ elif model_name == "ast-finetuned-audioset-10-10-0.448-v2":
234
+ expected_slice = torch.tensor([-1.5080, -7.4534, -8.8917])
235
+ elif model_name == "ast-finetuned-audioset-12-12-0.447":
236
+ expected_slice = torch.tensor([-0.5050, -6.5833, -8.0843])
237
+ elif model_name == "ast-finetuned-audioset-14-14-0.443":
238
+ expected_slice = torch.tensor([-0.3826, -7.0336, -8.2413])
239
+ elif model_name == "ast-finetuned-audioset-16-16-0.442":
240
+ expected_slice = torch.tensor([-1.2113, -6.9101, -8.3470])
241
+ elif model_name == "ast-finetuned-speech-commands-v2":
242
+ expected_slice = torch.tensor([6.1589, -8.0566, -8.7984])
243
+ else:
244
+ raise ValueError("Unknown model name")
245
+ if not torch.allclose(logits[0, :3], expected_slice, atol=1e-4):
246
+ raise ValueError("Logits don't match")
247
+ print("Looks ok!")
248
+
249
+ if pytorch_dump_folder_path is not None:
250
+ Path(pytorch_dump_folder_path).mkdir(exist_ok=True)
251
+ print(f"Saving model {model_name} to {pytorch_dump_folder_path}")
252
+ model.save_pretrained(pytorch_dump_folder_path)
253
+ print(f"Saving feature extractor to {pytorch_dump_folder_path}")
254
+ feature_extractor.save_pretrained(pytorch_dump_folder_path)
255
+
256
+ if push_to_hub:
257
+ print("Pushing model and feature extractor to the hub...")
258
+ model.push_to_hub(f"MIT/{model_name}")
259
+ feature_extractor.push_to_hub(f"MIT/{model_name}")
260
+
261
+
262
+ if __name__ == "__main__":
263
+ parser = argparse.ArgumentParser()
264
+ # Required parameters
265
+ parser.add_argument(
266
+ "--model_name",
267
+ default="ast-finetuned-audioset-10-10-0.4593",
268
+ type=str,
269
+ help="Name of the Audio Spectrogram Transformer model you'd like to convert.",
270
+ )
271
+ parser.add_argument(
272
+ "--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model directory."
273
+ )
274
+ parser.add_argument(
275
+ "--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
276
+ )
277
+
278
+ args = parser.parse_args()
279
+ convert_audio_spectrogram_transformer_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/feature_extraction_audio_spectrogram_transformer.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Feature extractor class for Audio Spectrogram Transformer.
17
+ """
18
+
19
+ from typing import List, Optional, Union
20
+
21
+ import numpy as np
22
+
23
+ from ...audio_utils import mel_filter_bank, spectrogram, window_function
24
+ from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
25
+ from ...feature_extraction_utils import BatchFeature
26
+ from ...utils import TensorType, is_speech_available, is_torch_available, logging
27
+
28
+
29
+ if is_speech_available():
30
+ import torchaudio.compliance.kaldi as ta_kaldi
31
+
32
+ if is_torch_available():
33
+ import torch
34
+
35
+
36
+ logger = logging.get_logger(__name__)
37
+
38
+
39
+ class ASTFeatureExtractor(SequenceFeatureExtractor):
40
+ r"""
41
+ Constructs a Audio Spectrogram Transformer (AST) feature extractor.
42
+
43
+ This feature extractor inherits from [`~feature_extraction_sequence_utils.SequenceFeatureExtractor`] which contains
44
+ most of the main methods. Users should refer to this superclass for more information regarding those methods.
45
+
46
+ This class extracts mel-filter bank features from raw speech using TorchAudio if installed or using numpy
47
+ otherwise, pads/truncates them to a fixed length and normalizes them using a mean and standard deviation.
48
+
49
+ Args:
50
+ feature_size (`int`, *optional*, defaults to 1):
51
+ The feature dimension of the extracted features.
52
+ sampling_rate (`int`, *optional*, defaults to 16000):
53
+ The sampling rate at which the audio files should be digitalized expressed in hertz (Hz).
54
+ num_mel_bins (`int`, *optional*, defaults to 128):
55
+ Number of Mel-frequency bins.
56
+ max_length (`int`, *optional*, defaults to 1024):
57
+ Maximum length to which to pad/truncate the extracted features.
58
+ do_normalize (`bool`, *optional*, defaults to `True`):
59
+ Whether or not to normalize the log-Mel features using `mean` and `std`.
60
+ mean (`float`, *optional*, defaults to -4.2677393):
61
+ The mean value used to normalize the log-Mel features. Uses the AudioSet mean by default.
62
+ std (`float`, *optional*, defaults to 4.5689974):
63
+ The standard deviation value used to normalize the log-Mel features. Uses the AudioSet standard deviation
64
+ by default.
65
+ return_attention_mask (`bool`, *optional*, defaults to `False`):
66
+ Whether or not [`~ASTFeatureExtractor.__call__`] should return `attention_mask`.
67
+ """
68
+
69
+ model_input_names = ["input_values", "attention_mask"]
70
+
71
+ def __init__(
72
+ self,
73
+ feature_size=1,
74
+ sampling_rate=16000,
75
+ num_mel_bins=128,
76
+ max_length=1024,
77
+ padding_value=0.0,
78
+ do_normalize=True,
79
+ mean=-4.2677393,
80
+ std=4.5689974,
81
+ return_attention_mask=False,
82
+ **kwargs,
83
+ ):
84
+ super().__init__(feature_size=feature_size, sampling_rate=sampling_rate, padding_value=padding_value, **kwargs)
85
+ self.num_mel_bins = num_mel_bins
86
+ self.max_length = max_length
87
+ self.do_normalize = do_normalize
88
+ self.mean = mean
89
+ self.std = std
90
+ self.return_attention_mask = return_attention_mask
91
+
92
+ if not is_speech_available():
93
+ mel_filters = mel_filter_bank(
94
+ num_frequency_bins=256,
95
+ num_mel_filters=self.num_mel_bins,
96
+ min_frequency=20,
97
+ max_frequency=sampling_rate // 2,
98
+ sampling_rate=sampling_rate,
99
+ norm=None,
100
+ mel_scale="kaldi",
101
+ triangularize_in_mel_space=True,
102
+ )
103
+
104
+ self.mel_filters = np.pad(mel_filters, ((0, 1), (0, 0)))
105
+ self.window = window_function(400, "hann", periodic=False)
106
+
107
+ def _extract_fbank_features(
108
+ self,
109
+ waveform: np.ndarray,
110
+ max_length: int,
111
+ ) -> np.ndarray:
112
+ """
113
+ Get mel-filter bank features using TorchAudio. Note that TorchAudio requires 16-bit signed integers as inputs
114
+ and hence the waveform should not be normalized before feature extraction.
115
+ """
116
+ # waveform = waveform * (2**15) # Kaldi compliance: 16-bit signed integers
117
+ if is_speech_available():
118
+ waveform = torch.from_numpy(waveform).unsqueeze(0)
119
+ fbank = ta_kaldi.fbank(
120
+ waveform,
121
+ sample_frequency=self.sampling_rate,
122
+ window_type="hanning",
123
+ num_mel_bins=self.num_mel_bins,
124
+ )
125
+ else:
126
+ waveform = np.squeeze(waveform)
127
+ fbank = spectrogram(
128
+ waveform,
129
+ self.window,
130
+ frame_length=400,
131
+ hop_length=160,
132
+ fft_length=512,
133
+ power=2.0,
134
+ center=False,
135
+ preemphasis=0.97,
136
+ mel_filters=self.mel_filters,
137
+ log_mel="log",
138
+ mel_floor=1.192092955078125e-07,
139
+ remove_dc_offset=True,
140
+ ).T
141
+
142
+ fbank = torch.from_numpy(fbank)
143
+
144
+ n_frames = fbank.shape[0]
145
+ difference = max_length - n_frames
146
+
147
+ # pad or truncate, depending on difference
148
+ if difference > 0:
149
+ pad_module = torch.nn.ZeroPad2d((0, 0, 0, difference))
150
+ fbank = pad_module(fbank)
151
+ elif difference < 0:
152
+ fbank = fbank[0:max_length, :]
153
+
154
+ fbank = fbank.numpy()
155
+
156
+ return fbank
157
+
158
+ def normalize(self, input_values: np.ndarray) -> np.ndarray:
159
+ return (input_values - (self.mean)) / (self.std * 2)
160
+
161
+ def __call__(
162
+ self,
163
+ raw_speech: Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]],
164
+ sampling_rate: Optional[int] = None,
165
+ return_tensors: Optional[Union[str, TensorType]] = None,
166
+ **kwargs,
167
+ ) -> BatchFeature:
168
+ """
169
+ Main method to featurize and prepare for the model one or several sequence(s).
170
+
171
+ Args:
172
+ raw_speech (`np.ndarray`, `List[float]`, `List[np.ndarray]`, `List[List[float]]`):
173
+ The sequence or batch of sequences to be padded. Each sequence can be a numpy array, a list of float
174
+ values, a list of numpy arrays or a list of list of float values. Must be mono channel audio, not
175
+ stereo, i.e. single float per timestep.
176
+ sampling_rate (`int`, *optional*):
177
+ The sampling rate at which the `raw_speech` input was sampled. It is strongly recommended to pass
178
+ `sampling_rate` at the forward call to prevent silent errors.
179
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
180
+ If set, will return tensors instead of list of python integers. Acceptable values are:
181
+
182
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
183
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
184
+ - `'np'`: Return Numpy `np.ndarray` objects.
185
+ """
186
+
187
+ if sampling_rate is not None:
188
+ if sampling_rate != self.sampling_rate:
189
+ raise ValueError(
190
+ f"The model corresponding to this feature extractor: {self} was trained using a sampling rate of"
191
+ f" {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled with"
192
+ f" {self.sampling_rate} and not {sampling_rate}."
193
+ )
194
+ else:
195
+ logger.warning(
196
+ "It is strongly recommended to pass the `sampling_rate` argument to this function. "
197
+ "Failing to do so can result in silent errors that might be hard to debug."
198
+ )
199
+
200
+ is_batched_numpy = isinstance(raw_speech, np.ndarray) and len(raw_speech.shape) > 1
201
+ if is_batched_numpy and len(raw_speech.shape) > 2:
202
+ raise ValueError(f"Only mono-channel audio is supported for input to {self}")
203
+ is_batched = is_batched_numpy or (
204
+ isinstance(raw_speech, (list, tuple)) and (isinstance(raw_speech[0], (np.ndarray, tuple, list)))
205
+ )
206
+
207
+ if is_batched:
208
+ raw_speech = [np.asarray(speech, dtype=np.float32) for speech in raw_speech]
209
+ elif not is_batched and not isinstance(raw_speech, np.ndarray):
210
+ raw_speech = np.asarray(raw_speech, dtype=np.float32)
211
+ elif isinstance(raw_speech, np.ndarray) and raw_speech.dtype is np.dtype(np.float64):
212
+ raw_speech = raw_speech.astype(np.float32)
213
+
214
+ # always return batch
215
+ if not is_batched:
216
+ raw_speech = [raw_speech]
217
+
218
+ # extract fbank features and pad/truncate to max_length
219
+ features = [self._extract_fbank_features(waveform, max_length=self.max_length) for waveform in raw_speech]
220
+
221
+ # convert into BatchFeature
222
+ padded_inputs = BatchFeature({"input_values": features})
223
+
224
+ # make sure list is in array format
225
+ input_values = padded_inputs.get("input_values")
226
+ if isinstance(input_values[0], list):
227
+ padded_inputs["input_values"] = [np.asarray(feature, dtype=np.float32) for feature in input_values]
228
+
229
+ # normalization
230
+ if self.do_normalize:
231
+ padded_inputs["input_values"] = [self.normalize(feature) for feature in input_values]
232
+
233
+ if return_tensors is not None:
234
+ padded_inputs = padded_inputs.convert_to_tensors(return_tensors)
235
+
236
+ return padded_inputs
venv/lib/python3.10/site-packages/transformers/models/audio_spectrogram_transformer/modeling_audio_spectrogram_transformer.py ADDED
@@ -0,0 +1,613 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 MIT and The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch Audio Spectrogram Transformer (AST) model."""
16
+
17
+ import math
18
+ from typing import Dict, List, Optional, Set, Tuple, Union
19
+
20
+ import torch
21
+ import torch.utils.checkpoint
22
+ from torch import nn
23
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
24
+
25
+ from ...activations import ACT2FN
26
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, SequenceClassifierOutput
27
+ from ...modeling_utils import PreTrainedModel
28
+ from ...pytorch_utils import find_pruneable_heads_and_indices, prune_linear_layer
29
+ from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
30
+ from .configuration_audio_spectrogram_transformer import ASTConfig
31
+
32
+
33
+ logger = logging.get_logger(__name__)
34
+
35
+ # General docstring
36
+ _CONFIG_FOR_DOC = "ASTConfig"
37
+
38
+ # Base docstring
39
+ _CHECKPOINT_FOR_DOC = "MIT/ast-finetuned-audioset-10-10-0.4593"
40
+ _EXPECTED_OUTPUT_SHAPE = [1, 1214, 768]
41
+
42
+ # Audio classification docstring
43
+ _SEQ_CLASS_CHECKPOINT = "MIT/ast-finetuned-audioset-10-10-0.4593"
44
+ _SEQ_CLASS_EXPECTED_OUTPUT = "'Speech'"
45
+ _SEQ_CLASS_EXPECTED_LOSS = 0.17
46
+
47
+
48
+ from ..deprecated._archive_maps import AUDIO_SPECTROGRAM_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
49
+
50
+
51
+ class ASTEmbeddings(nn.Module):
52
+ """
53
+ Construct the CLS token, position and patch embeddings.
54
+ """
55
+
56
+ def __init__(self, config: ASTConfig) -> None:
57
+ super().__init__()
58
+
59
+ self.cls_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
60
+ self.distillation_token = nn.Parameter(torch.zeros(1, 1, config.hidden_size))
61
+ self.patch_embeddings = ASTPatchEmbeddings(config)
62
+
63
+ frequency_out_dimension, time_out_dimension = self.get_shape(config)
64
+ num_patches = frequency_out_dimension * time_out_dimension
65
+ self.position_embeddings = nn.Parameter(torch.zeros(1, num_patches + 2, config.hidden_size))
66
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
67
+ self.config = config
68
+
69
+ def get_shape(self, config):
70
+ # see Karpathy's cs231n blog on how to calculate the output dimensions
71
+ # https://cs231n.github.io/convolutional-networks/#conv
72
+ frequency_out_dimension = (config.num_mel_bins - config.patch_size) // config.frequency_stride + 1
73
+ time_out_dimension = (config.max_length - config.patch_size) // config.time_stride + 1
74
+
75
+ return frequency_out_dimension, time_out_dimension
76
+
77
+ def forward(self, input_values: torch.Tensor) -> torch.Tensor:
78
+ batch_size = input_values.shape[0]
79
+ embeddings = self.patch_embeddings(input_values)
80
+
81
+ cls_tokens = self.cls_token.expand(batch_size, -1, -1)
82
+ distillation_tokens = self.distillation_token.expand(batch_size, -1, -1)
83
+ embeddings = torch.cat((cls_tokens, distillation_tokens, embeddings), dim=1)
84
+ embeddings = embeddings + self.position_embeddings
85
+ embeddings = self.dropout(embeddings)
86
+
87
+ return embeddings
88
+
89
+
90
+ class ASTPatchEmbeddings(nn.Module):
91
+ """
92
+ This class turns `input_values` into the initial `hidden_states` (patch embeddings) of shape `(batch_size,
93
+ seq_length, hidden_size)` to be consumed by a Transformer.
94
+ """
95
+
96
+ def __init__(self, config):
97
+ super().__init__()
98
+
99
+ patch_size = config.patch_size
100
+ frequency_stride = config.frequency_stride
101
+ time_stride = config.time_stride
102
+
103
+ self.projection = nn.Conv2d(
104
+ 1, config.hidden_size, kernel_size=(patch_size, patch_size), stride=(frequency_stride, time_stride)
105
+ )
106
+
107
+ def forward(self, input_values: torch.Tensor) -> torch.Tensor:
108
+ input_values = input_values.unsqueeze(1)
109
+ input_values = input_values.transpose(2, 3)
110
+ embeddings = self.projection(input_values).flatten(2).transpose(1, 2)
111
+ return embeddings
112
+
113
+
114
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfAttention with ViT->AST
115
+ class ASTSelfAttention(nn.Module):
116
+ def __init__(self, config: ASTConfig) -> None:
117
+ super().__init__()
118
+ if config.hidden_size % config.num_attention_heads != 0 and not hasattr(config, "embedding_size"):
119
+ raise ValueError(
120
+ f"The hidden size {config.hidden_size,} is not a multiple of the number of attention "
121
+ f"heads {config.num_attention_heads}."
122
+ )
123
+
124
+ self.num_attention_heads = config.num_attention_heads
125
+ self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
126
+ self.all_head_size = self.num_attention_heads * self.attention_head_size
127
+
128
+ self.query = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
129
+ self.key = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
130
+ self.value = nn.Linear(config.hidden_size, self.all_head_size, bias=config.qkv_bias)
131
+
132
+ self.dropout = nn.Dropout(config.attention_probs_dropout_prob)
133
+
134
+ def transpose_for_scores(self, x: torch.Tensor) -> torch.Tensor:
135
+ new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
136
+ x = x.view(new_x_shape)
137
+ return x.permute(0, 2, 1, 3)
138
+
139
+ def forward(
140
+ self, hidden_states, head_mask: Optional[torch.Tensor] = None, output_attentions: bool = False
141
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
142
+ mixed_query_layer = self.query(hidden_states)
143
+
144
+ key_layer = self.transpose_for_scores(self.key(hidden_states))
145
+ value_layer = self.transpose_for_scores(self.value(hidden_states))
146
+ query_layer = self.transpose_for_scores(mixed_query_layer)
147
+
148
+ # Take the dot product between "query" and "key" to get the raw attention scores.
149
+ attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
150
+
151
+ attention_scores = attention_scores / math.sqrt(self.attention_head_size)
152
+
153
+ # Normalize the attention scores to probabilities.
154
+ attention_probs = nn.functional.softmax(attention_scores, dim=-1)
155
+
156
+ # This is actually dropping out entire tokens to attend to, which might
157
+ # seem a bit unusual, but is taken from the original Transformer paper.
158
+ attention_probs = self.dropout(attention_probs)
159
+
160
+ # Mask heads if we want to
161
+ if head_mask is not None:
162
+ attention_probs = attention_probs * head_mask
163
+
164
+ context_layer = torch.matmul(attention_probs, value_layer)
165
+
166
+ context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
167
+ new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
168
+ context_layer = context_layer.view(new_context_layer_shape)
169
+
170
+ outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
171
+
172
+ return outputs
173
+
174
+
175
+ # Copied from transformers.models.vit.modeling_vit.ViTSelfOutput with ViT->AST
176
+ class ASTSelfOutput(nn.Module):
177
+ """
178
+ The residual connection is defined in ASTLayer instead of here (as is the case with other models), due to the
179
+ layernorm applied before each block.
180
+ """
181
+
182
+ def __init__(self, config: ASTConfig) -> None:
183
+ super().__init__()
184
+ self.dense = nn.Linear(config.hidden_size, config.hidden_size)
185
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
186
+
187
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
188
+ hidden_states = self.dense(hidden_states)
189
+ hidden_states = self.dropout(hidden_states)
190
+
191
+ return hidden_states
192
+
193
+
194
+ # Copied from transformers.models.vit.modeling_vit.ViTAttention with ViT->AST
195
+ class ASTAttention(nn.Module):
196
+ def __init__(self, config: ASTConfig) -> None:
197
+ super().__init__()
198
+ self.attention = ASTSelfAttention(config)
199
+ self.output = ASTSelfOutput(config)
200
+ self.pruned_heads = set()
201
+
202
+ def prune_heads(self, heads: Set[int]) -> None:
203
+ if len(heads) == 0:
204
+ return
205
+ heads, index = find_pruneable_heads_and_indices(
206
+ heads, self.attention.num_attention_heads, self.attention.attention_head_size, self.pruned_heads
207
+ )
208
+
209
+ # Prune linear layers
210
+ self.attention.query = prune_linear_layer(self.attention.query, index)
211
+ self.attention.key = prune_linear_layer(self.attention.key, index)
212
+ self.attention.value = prune_linear_layer(self.attention.value, index)
213
+ self.output.dense = prune_linear_layer(self.output.dense, index, dim=1)
214
+
215
+ # Update hyper params and store pruned heads
216
+ self.attention.num_attention_heads = self.attention.num_attention_heads - len(heads)
217
+ self.attention.all_head_size = self.attention.attention_head_size * self.attention.num_attention_heads
218
+ self.pruned_heads = self.pruned_heads.union(heads)
219
+
220
+ def forward(
221
+ self,
222
+ hidden_states: torch.Tensor,
223
+ head_mask: Optional[torch.Tensor] = None,
224
+ output_attentions: bool = False,
225
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
226
+ self_outputs = self.attention(hidden_states, head_mask, output_attentions)
227
+
228
+ attention_output = self.output(self_outputs[0], hidden_states)
229
+
230
+ outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
231
+ return outputs
232
+
233
+
234
+ # Copied from transformers.models.vit.modeling_vit.ViTIntermediate with ViT->AST
235
+ class ASTIntermediate(nn.Module):
236
+ def __init__(self, config: ASTConfig) -> None:
237
+ super().__init__()
238
+ self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
239
+ if isinstance(config.hidden_act, str):
240
+ self.intermediate_act_fn = ACT2FN[config.hidden_act]
241
+ else:
242
+ self.intermediate_act_fn = config.hidden_act
243
+
244
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
245
+ hidden_states = self.dense(hidden_states)
246
+ hidden_states = self.intermediate_act_fn(hidden_states)
247
+
248
+ return hidden_states
249
+
250
+
251
+ # Copied from transformers.models.vit.modeling_vit.ViTOutput with ViT->AST
252
+ class ASTOutput(nn.Module):
253
+ def __init__(self, config: ASTConfig) -> None:
254
+ super().__init__()
255
+ self.dense = nn.Linear(config.intermediate_size, config.hidden_size)
256
+ self.dropout = nn.Dropout(config.hidden_dropout_prob)
257
+
258
+ def forward(self, hidden_states: torch.Tensor, input_tensor: torch.Tensor) -> torch.Tensor:
259
+ hidden_states = self.dense(hidden_states)
260
+ hidden_states = self.dropout(hidden_states)
261
+
262
+ hidden_states = hidden_states + input_tensor
263
+
264
+ return hidden_states
265
+
266
+
267
+ # Copied from transformers.models.vit.modeling_vit.ViTLayer with ViT->AST
268
+ class ASTLayer(nn.Module):
269
+ """This corresponds to the Block class in the timm implementation."""
270
+
271
+ def __init__(self, config: ASTConfig) -> None:
272
+ super().__init__()
273
+ self.chunk_size_feed_forward = config.chunk_size_feed_forward
274
+ self.seq_len_dim = 1
275
+ self.attention = ASTAttention(config)
276
+ self.intermediate = ASTIntermediate(config)
277
+ self.output = ASTOutput(config)
278
+ self.layernorm_before = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
279
+ self.layernorm_after = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
280
+
281
+ def forward(
282
+ self,
283
+ hidden_states: torch.Tensor,
284
+ head_mask: Optional[torch.Tensor] = None,
285
+ output_attentions: bool = False,
286
+ ) -> Union[Tuple[torch.Tensor, torch.Tensor], Tuple[torch.Tensor]]:
287
+ self_attention_outputs = self.attention(
288
+ self.layernorm_before(hidden_states), # in AST, layernorm is applied before self-attention
289
+ head_mask,
290
+ output_attentions=output_attentions,
291
+ )
292
+ attention_output = self_attention_outputs[0]
293
+ outputs = self_attention_outputs[1:] # add self attentions if we output attention weights
294
+
295
+ # first residual connection
296
+ hidden_states = attention_output + hidden_states
297
+
298
+ # in AST, layernorm is also applied after self-attention
299
+ layer_output = self.layernorm_after(hidden_states)
300
+ layer_output = self.intermediate(layer_output)
301
+
302
+ # second residual connection is done here
303
+ layer_output = self.output(layer_output, hidden_states)
304
+
305
+ outputs = (layer_output,) + outputs
306
+
307
+ return outputs
308
+
309
+
310
+ # Copied from transformers.models.vit.modeling_vit.ViTEncoder with ViT->AST
311
+ class ASTEncoder(nn.Module):
312
+ def __init__(self, config: ASTConfig) -> None:
313
+ super().__init__()
314
+ self.config = config
315
+ self.layer = nn.ModuleList([ASTLayer(config) for _ in range(config.num_hidden_layers)])
316
+ self.gradient_checkpointing = False
317
+
318
+ def forward(
319
+ self,
320
+ hidden_states: torch.Tensor,
321
+ head_mask: Optional[torch.Tensor] = None,
322
+ output_attentions: bool = False,
323
+ output_hidden_states: bool = False,
324
+ return_dict: bool = True,
325
+ ) -> Union[tuple, BaseModelOutput]:
326
+ all_hidden_states = () if output_hidden_states else None
327
+ all_self_attentions = () if output_attentions else None
328
+
329
+ for i, layer_module in enumerate(self.layer):
330
+ if output_hidden_states:
331
+ all_hidden_states = all_hidden_states + (hidden_states,)
332
+
333
+ layer_head_mask = head_mask[i] if head_mask is not None else None
334
+
335
+ if self.gradient_checkpointing and self.training:
336
+ layer_outputs = self._gradient_checkpointing_func(
337
+ layer_module.__call__,
338
+ hidden_states,
339
+ layer_head_mask,
340
+ output_attentions,
341
+ )
342
+ else:
343
+ layer_outputs = layer_module(hidden_states, layer_head_mask, output_attentions)
344
+
345
+ hidden_states = layer_outputs[0]
346
+
347
+ if output_attentions:
348
+ all_self_attentions = all_self_attentions + (layer_outputs[1],)
349
+
350
+ if output_hidden_states:
351
+ all_hidden_states = all_hidden_states + (hidden_states,)
352
+
353
+ if not return_dict:
354
+ return tuple(v for v in [hidden_states, all_hidden_states, all_self_attentions] if v is not None)
355
+ return BaseModelOutput(
356
+ last_hidden_state=hidden_states,
357
+ hidden_states=all_hidden_states,
358
+ attentions=all_self_attentions,
359
+ )
360
+
361
+
362
+ class ASTPreTrainedModel(PreTrainedModel):
363
+ """
364
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
365
+ models.
366
+ """
367
+
368
+ config_class = ASTConfig
369
+ base_model_prefix = "audio_spectrogram_transformer"
370
+ main_input_name = "input_values"
371
+ supports_gradient_checkpointing = True
372
+
373
+ # Copied from transformers.models.deit.modeling_deit.DeiTPreTrainedModel._init_weights
374
+ def _init_weights(self, module: Union[nn.Linear, nn.Conv2d, nn.LayerNorm]) -> None:
375
+ """Initialize the weights"""
376
+ if isinstance(module, (nn.Linear, nn.Conv2d)):
377
+ # Upcast the input in `fp32` and cast it back to desired `dtype` to avoid
378
+ # `trunc_normal_cpu` not implemented in `half` issues
379
+ module.weight.data = nn.init.trunc_normal_(
380
+ module.weight.data.to(torch.float32), mean=0.0, std=self.config.initializer_range
381
+ ).to(module.weight.dtype)
382
+ if module.bias is not None:
383
+ module.bias.data.zero_()
384
+ elif isinstance(module, nn.LayerNorm):
385
+ module.bias.data.zero_()
386
+ module.weight.data.fill_(1.0)
387
+
388
+
389
+ AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING = r"""
390
+ This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it
391
+ as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and
392
+ behavior.
393
+
394
+ Parameters:
395
+ config ([`ASTConfig`]):
396
+ Model configuration class with all the parameters of the model. Initializing with a config file does not
397
+ load the weights associated with the model, only the configuration. Check out the
398
+ [`~PreTrainedModel.from_pretrained`] method to load the model weights.
399
+ """
400
+
401
+ AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING = r"""
402
+ Args:
403
+ input_values (`torch.FloatTensor` of shape `(batch_size, max_length, num_mel_bins)`):
404
+ Float values mel features extracted from the raw audio waveform. Raw audio waveform can be obtained by
405
+ loading a `.flac` or `.wav` audio file into an array of type `List[float]` or a `numpy.ndarray`, *e.g.* via
406
+ the soundfile library (`pip install soundfile`). To prepare the array into `input_features`, the
407
+ [`AutoFeatureExtractor`] should be used for extracting the mel features, padding and conversion into a
408
+ tensor of type `torch.FloatTensor`. See [`~ASTFeatureExtractor.__call__`]
409
+
410
+ head_mask (`torch.FloatTensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
411
+ Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
412
+
413
+ - 1 indicates the head is **not masked**,
414
+ - 0 indicates the head is **masked**.
415
+
416
+ output_attentions (`bool`, *optional*):
417
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
418
+ tensors for more detail.
419
+ output_hidden_states (`bool`, *optional*):
420
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
421
+ more detail.
422
+ return_dict (`bool`, *optional*):
423
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
424
+ """
425
+
426
+
427
+ @add_start_docstrings(
428
+ "The bare AST Model transformer outputting raw hidden-states without any specific head on top.",
429
+ AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING,
430
+ )
431
+ class ASTModel(ASTPreTrainedModel):
432
+ def __init__(self, config: ASTConfig) -> None:
433
+ super().__init__(config)
434
+ self.config = config
435
+
436
+ self.embeddings = ASTEmbeddings(config)
437
+ self.encoder = ASTEncoder(config)
438
+
439
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
440
+
441
+ # Initialize weights and apply final processing
442
+ self.post_init()
443
+
444
+ def get_input_embeddings(self) -> ASTPatchEmbeddings:
445
+ return self.embeddings.patch_embeddings
446
+
447
+ def _prune_heads(self, heads_to_prune: Dict[int, List[int]]) -> None:
448
+ """
449
+ Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
450
+ class PreTrainedModel
451
+ """
452
+ for layer, heads in heads_to_prune.items():
453
+ self.encoder.layer[layer].attention.prune_heads(heads)
454
+
455
+ @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING)
456
+ @add_code_sample_docstrings(
457
+ checkpoint=_CHECKPOINT_FOR_DOC,
458
+ output_type=BaseModelOutputWithPooling,
459
+ config_class=_CONFIG_FOR_DOC,
460
+ modality="audio",
461
+ expected_output=_EXPECTED_OUTPUT_SHAPE,
462
+ )
463
+ def forward(
464
+ self,
465
+ input_values: Optional[torch.Tensor] = None,
466
+ head_mask: Optional[torch.Tensor] = None,
467
+ output_attentions: Optional[bool] = None,
468
+ output_hidden_states: Optional[bool] = None,
469
+ return_dict: Optional[bool] = None,
470
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
471
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
472
+ output_hidden_states = (
473
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
474
+ )
475
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
476
+
477
+ if input_values is None:
478
+ raise ValueError("You have to specify input_values")
479
+
480
+ # Prepare head mask if needed
481
+ # 1.0 in head_mask indicate we keep the head
482
+ # attention_probs has shape bsz x n_heads x N x N
483
+ # input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
484
+ # and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
485
+ head_mask = self.get_head_mask(head_mask, self.config.num_hidden_layers)
486
+
487
+ embedding_output = self.embeddings(input_values)
488
+
489
+ encoder_outputs = self.encoder(
490
+ embedding_output,
491
+ head_mask=head_mask,
492
+ output_attentions=output_attentions,
493
+ output_hidden_states=output_hidden_states,
494
+ return_dict=return_dict,
495
+ )
496
+ sequence_output = encoder_outputs[0]
497
+ sequence_output = self.layernorm(sequence_output)
498
+
499
+ pooled_output = (sequence_output[:, 0] + sequence_output[:, 1]) / 2
500
+
501
+ if not return_dict:
502
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
503
+
504
+ return BaseModelOutputWithPooling(
505
+ last_hidden_state=sequence_output,
506
+ pooler_output=pooled_output,
507
+ hidden_states=encoder_outputs.hidden_states,
508
+ attentions=encoder_outputs.attentions,
509
+ )
510
+
511
+
512
+ class ASTMLPHead(nn.Module):
513
+ def __init__(self, config: ASTConfig):
514
+ super().__init__()
515
+ self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
516
+ self.dense = nn.Linear(config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
517
+
518
+ def forward(self, hidden_state):
519
+ hidden_state = self.layernorm(hidden_state)
520
+ hidden_state = self.dense(hidden_state)
521
+ return hidden_state
522
+
523
+
524
+ @add_start_docstrings(
525
+ """
526
+ Audio Spectrogram Transformer model with an audio classification head on top (a linear layer on top of the pooled
527
+ output) e.g. for datasets like AudioSet, Speech Commands v2.
528
+ """,
529
+ AUDIO_SPECTROGRAM_TRANSFORMER_START_DOCSTRING,
530
+ )
531
+ class ASTForAudioClassification(ASTPreTrainedModel):
532
+ def __init__(self, config: ASTConfig) -> None:
533
+ super().__init__(config)
534
+
535
+ self.num_labels = config.num_labels
536
+ self.audio_spectrogram_transformer = ASTModel(config)
537
+
538
+ # Classifier head
539
+ self.classifier = ASTMLPHead(config)
540
+
541
+ # Initialize weights and apply final processing
542
+ self.post_init()
543
+
544
+ @add_start_docstrings_to_model_forward(AUDIO_SPECTROGRAM_TRANSFORMER_INPUTS_DOCSTRING)
545
+ @add_code_sample_docstrings(
546
+ checkpoint=_SEQ_CLASS_CHECKPOINT,
547
+ output_type=SequenceClassifierOutput,
548
+ config_class=_CONFIG_FOR_DOC,
549
+ modality="audio",
550
+ expected_output=_SEQ_CLASS_EXPECTED_OUTPUT,
551
+ expected_loss=_SEQ_CLASS_EXPECTED_LOSS,
552
+ )
553
+ def forward(
554
+ self,
555
+ input_values: Optional[torch.Tensor] = None,
556
+ head_mask: Optional[torch.Tensor] = None,
557
+ labels: Optional[torch.Tensor] = None,
558
+ output_attentions: Optional[bool] = None,
559
+ output_hidden_states: Optional[bool] = None,
560
+ return_dict: Optional[bool] = None,
561
+ ) -> Union[tuple, SequenceClassifierOutput]:
562
+ r"""
563
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
564
+ Labels for computing the audio classification/regression loss. Indices should be in `[0, ...,
565
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
566
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
567
+ """
568
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
569
+
570
+ outputs = self.audio_spectrogram_transformer(
571
+ input_values,
572
+ head_mask=head_mask,
573
+ output_attentions=output_attentions,
574
+ output_hidden_states=output_hidden_states,
575
+ return_dict=return_dict,
576
+ )
577
+
578
+ pooled_output = outputs[1]
579
+ logits = self.classifier(pooled_output)
580
+
581
+ loss = None
582
+ if labels is not None:
583
+ if self.config.problem_type is None:
584
+ if self.num_labels == 1:
585
+ self.config.problem_type = "regression"
586
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
587
+ self.config.problem_type = "single_label_classification"
588
+ else:
589
+ self.config.problem_type = "multi_label_classification"
590
+
591
+ if self.config.problem_type == "regression":
592
+ loss_fct = MSELoss()
593
+ if self.num_labels == 1:
594
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
595
+ else:
596
+ loss = loss_fct(logits, labels)
597
+ elif self.config.problem_type == "single_label_classification":
598
+ loss_fct = CrossEntropyLoss()
599
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
600
+ elif self.config.problem_type == "multi_label_classification":
601
+ loss_fct = BCEWithLogitsLoss()
602
+ loss = loss_fct(logits, labels)
603
+
604
+ if not return_dict:
605
+ output = (logits,) + outputs[2:]
606
+ return ((loss,) + output) if loss is not None else output
607
+
608
+ return SequenceClassifierOutput(
609
+ loss=loss,
610
+ logits=logits,
611
+ hidden_states=outputs.hidden_states,
612
+ attentions=outputs.attentions,
613
+ )
venv/lib/python3.10/site-packages/transformers/models/clip/__init__.py ADDED
@@ -0,0 +1,183 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2021 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ from typing import TYPE_CHECKING
15
+
16
+ from ...utils import (
17
+ OptionalDependencyNotAvailable,
18
+ _LazyModule,
19
+ is_flax_available,
20
+ is_tf_available,
21
+ is_tokenizers_available,
22
+ is_torch_available,
23
+ is_vision_available,
24
+ )
25
+
26
+
27
+ _import_structure = {
28
+ "configuration_clip": [
29
+ "CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP",
30
+ "CLIPConfig",
31
+ "CLIPOnnxConfig",
32
+ "CLIPTextConfig",
33
+ "CLIPVisionConfig",
34
+ ],
35
+ "processing_clip": ["CLIPProcessor"],
36
+ "tokenization_clip": ["CLIPTokenizer"],
37
+ }
38
+
39
+ try:
40
+ if not is_tokenizers_available():
41
+ raise OptionalDependencyNotAvailable()
42
+ except OptionalDependencyNotAvailable:
43
+ pass
44
+ else:
45
+ _import_structure["tokenization_clip_fast"] = ["CLIPTokenizerFast"]
46
+
47
+ try:
48
+ if not is_vision_available():
49
+ raise OptionalDependencyNotAvailable()
50
+ except OptionalDependencyNotAvailable:
51
+ pass
52
+ else:
53
+ _import_structure["feature_extraction_clip"] = ["CLIPFeatureExtractor"]
54
+ _import_structure["image_processing_clip"] = ["CLIPImageProcessor"]
55
+
56
+ try:
57
+ if not is_torch_available():
58
+ raise OptionalDependencyNotAvailable()
59
+ except OptionalDependencyNotAvailable:
60
+ pass
61
+ else:
62
+ _import_structure["modeling_clip"] = [
63
+ "CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
64
+ "CLIPModel",
65
+ "CLIPPreTrainedModel",
66
+ "CLIPTextModel",
67
+ "CLIPTextModelWithProjection",
68
+ "CLIPVisionModel",
69
+ "CLIPVisionModelWithProjection",
70
+ "CLIPForImageClassification",
71
+ ]
72
+
73
+ try:
74
+ if not is_tf_available():
75
+ raise OptionalDependencyNotAvailable()
76
+ except OptionalDependencyNotAvailable:
77
+ pass
78
+ else:
79
+ _import_structure["modeling_tf_clip"] = [
80
+ "TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST",
81
+ "TFCLIPModel",
82
+ "TFCLIPPreTrainedModel",
83
+ "TFCLIPTextModel",
84
+ "TFCLIPVisionModel",
85
+ ]
86
+
87
+ try:
88
+ if not is_flax_available():
89
+ raise OptionalDependencyNotAvailable()
90
+ except OptionalDependencyNotAvailable:
91
+ pass
92
+ else:
93
+ _import_structure["modeling_flax_clip"] = [
94
+ "FlaxCLIPModel",
95
+ "FlaxCLIPPreTrainedModel",
96
+ "FlaxCLIPTextModel",
97
+ "FlaxCLIPTextPreTrainedModel",
98
+ "FlaxCLIPTextModelWithProjection",
99
+ "FlaxCLIPVisionModel",
100
+ "FlaxCLIPVisionPreTrainedModel",
101
+ ]
102
+
103
+
104
+ if TYPE_CHECKING:
105
+ from .configuration_clip import (
106
+ CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
107
+ CLIPConfig,
108
+ CLIPOnnxConfig,
109
+ CLIPTextConfig,
110
+ CLIPVisionConfig,
111
+ )
112
+ from .processing_clip import CLIPProcessor
113
+ from .tokenization_clip import CLIPTokenizer
114
+
115
+ try:
116
+ if not is_tokenizers_available():
117
+ raise OptionalDependencyNotAvailable()
118
+ except OptionalDependencyNotAvailable:
119
+ pass
120
+ else:
121
+ from .tokenization_clip_fast import CLIPTokenizerFast
122
+
123
+ try:
124
+ if not is_vision_available():
125
+ raise OptionalDependencyNotAvailable()
126
+ except OptionalDependencyNotAvailable:
127
+ pass
128
+ else:
129
+ from .feature_extraction_clip import CLIPFeatureExtractor
130
+ from .image_processing_clip import CLIPImageProcessor
131
+
132
+ try:
133
+ if not is_torch_available():
134
+ raise OptionalDependencyNotAvailable()
135
+ except OptionalDependencyNotAvailable:
136
+ pass
137
+ else:
138
+ from .modeling_clip import (
139
+ CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
140
+ CLIPForImageClassification,
141
+ CLIPModel,
142
+ CLIPPreTrainedModel,
143
+ CLIPTextModel,
144
+ CLIPTextModelWithProjection,
145
+ CLIPVisionModel,
146
+ CLIPVisionModelWithProjection,
147
+ )
148
+
149
+ try:
150
+ if not is_tf_available():
151
+ raise OptionalDependencyNotAvailable()
152
+ except OptionalDependencyNotAvailable:
153
+ pass
154
+ else:
155
+ from .modeling_tf_clip import (
156
+ TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
157
+ TFCLIPModel,
158
+ TFCLIPPreTrainedModel,
159
+ TFCLIPTextModel,
160
+ TFCLIPVisionModel,
161
+ )
162
+
163
+ try:
164
+ if not is_flax_available():
165
+ raise OptionalDependencyNotAvailable()
166
+ except OptionalDependencyNotAvailable:
167
+ pass
168
+ else:
169
+ from .modeling_flax_clip import (
170
+ FlaxCLIPModel,
171
+ FlaxCLIPPreTrainedModel,
172
+ FlaxCLIPTextModel,
173
+ FlaxCLIPTextModelWithProjection,
174
+ FlaxCLIPTextPreTrainedModel,
175
+ FlaxCLIPVisionModel,
176
+ FlaxCLIPVisionPreTrainedModel,
177
+ )
178
+
179
+
180
+ else:
181
+ import sys
182
+
183
+ sys.modules[__name__] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (2.6 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/configuration_clip.cpython-310.pyc ADDED
Binary file (16.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/convert_clip_original_pytorch_to_hf.cpython-310.pyc ADDED
Binary file (3.99 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/feature_extraction_clip.cpython-310.pyc ADDED
Binary file (997 Bytes). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/image_processing_clip.cpython-310.pyc ADDED
Binary file (13.3 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_clip.cpython-310.pyc ADDED
Binary file (44 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_flax_clip.cpython-310.pyc ADDED
Binary file (38.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/modeling_tf_clip.cpython-310.pyc ADDED
Binary file (44.4 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/processing_clip.cpython-310.pyc ADDED
Binary file (6.5 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/__pycache__/tokenization_clip_fast.cpython-310.pyc ADDED
Binary file (5.91 kB). View file
 
venv/lib/python3.10/site-packages/transformers/models/clip/configuration_clip.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ CLIP model configuration"""
16
+
17
+ import os
18
+ from collections import OrderedDict
19
+ from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
20
+
21
+
22
+ if TYPE_CHECKING:
23
+ from ...processing_utils import ProcessorMixin
24
+ from ...utils import TensorType
25
+
26
+ from ...configuration_utils import PretrainedConfig
27
+ from ...onnx import OnnxConfig
28
+ from ...utils import logging
29
+
30
+
31
+ logger = logging.get_logger(__name__)
32
+
33
+
34
+ from ..deprecated._archive_maps import CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP # noqa: F401, E402
35
+
36
+
37
+ class CLIPTextConfig(PretrainedConfig):
38
+ r"""
39
+ This is the configuration class to store the configuration of a [`CLIPTextModel`]. It is used to instantiate a CLIP
40
+ text encoder according to the specified arguments, defining the model architecture. Instantiating a configuration
41
+ with the defaults will yield a similar configuration to that of the text encoder of the CLIP
42
+ [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
43
+
44
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
45
+ documentation from [`PretrainedConfig`] for more information.
46
+
47
+ Args:
48
+ vocab_size (`int`, *optional*, defaults to 49408):
49
+ Vocabulary size of the CLIP text model. Defines the number of different tokens that can be represented by
50
+ the `inputs_ids` passed when calling [`CLIPModel`].
51
+ hidden_size (`int`, *optional*, defaults to 512):
52
+ Dimensionality of the encoder layers and the pooler layer.
53
+ intermediate_size (`int`, *optional*, defaults to 2048):
54
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
55
+ projection_dim (`int`, *optional*, defaults to 512):
56
+ Dimentionality of text and vision projection layers.
57
+ num_hidden_layers (`int`, *optional*, defaults to 12):
58
+ Number of hidden layers in the Transformer encoder.
59
+ num_attention_heads (`int`, *optional*, defaults to 8):
60
+ Number of attention heads for each attention layer in the Transformer encoder.
61
+ max_position_embeddings (`int`, *optional*, defaults to 77):
62
+ The maximum sequence length that this model might ever be used with. Typically set this to something large
63
+ just in case (e.g., 512 or 1024 or 2048).
64
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
65
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
66
+ `"relu"`, `"selu"` and `"gelu_new"` `"quick_gelu"` are supported.
67
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
68
+ The epsilon used by the layer normalization layers.
69
+ attention_dropout (`float`, *optional*, defaults to 0.0):
70
+ The dropout ratio for the attention probabilities.
71
+ initializer_range (`float`, *optional*, defaults to 0.02):
72
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
73
+ initializer_factor (`float`, *optional*, defaults to 1.0):
74
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
75
+ testing).
76
+ pad_token_id (`int`, *optional*, defaults to 1):
77
+ Padding token id.
78
+ bos_token_id (`int`, *optional*, defaults to 49406):
79
+ Beginning of stream token id.
80
+ eos_token_id (`int`, *optional*, defaults to 49407):
81
+ End of stream token id.
82
+
83
+ Example:
84
+
85
+ ```python
86
+ >>> from transformers import CLIPTextConfig, CLIPTextModel
87
+
88
+ >>> # Initializing a CLIPTextConfig with openai/clip-vit-base-patch32 style configuration
89
+ >>> configuration = CLIPTextConfig()
90
+
91
+ >>> # Initializing a CLIPTextModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
92
+ >>> model = CLIPTextModel(configuration)
93
+
94
+ >>> # Accessing the model configuration
95
+ >>> configuration = model.config
96
+ ```"""
97
+
98
+ model_type = "clip_text_model"
99
+
100
+ def __init__(
101
+ self,
102
+ vocab_size=49408,
103
+ hidden_size=512,
104
+ intermediate_size=2048,
105
+ projection_dim=512,
106
+ num_hidden_layers=12,
107
+ num_attention_heads=8,
108
+ max_position_embeddings=77,
109
+ hidden_act="quick_gelu",
110
+ layer_norm_eps=1e-5,
111
+ attention_dropout=0.0,
112
+ initializer_range=0.02,
113
+ initializer_factor=1.0,
114
+ # This differs from `CLIPTokenizer`'s default and from openai/clip
115
+ # See https://github.com/huggingface/transformers/pull/24773#issuecomment-1632287538
116
+ pad_token_id=1,
117
+ bos_token_id=49406,
118
+ eos_token_id=49407,
119
+ **kwargs,
120
+ ):
121
+ super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs)
122
+
123
+ self.vocab_size = vocab_size
124
+ self.hidden_size = hidden_size
125
+ self.intermediate_size = intermediate_size
126
+ self.projection_dim = projection_dim
127
+ self.num_hidden_layers = num_hidden_layers
128
+ self.num_attention_heads = num_attention_heads
129
+ self.max_position_embeddings = max_position_embeddings
130
+ self.layer_norm_eps = layer_norm_eps
131
+ self.hidden_act = hidden_act
132
+ self.initializer_range = initializer_range
133
+ self.initializer_factor = initializer_factor
134
+ self.attention_dropout = attention_dropout
135
+
136
+ @classmethod
137
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
138
+ cls._set_token_in_kwargs(kwargs)
139
+
140
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
141
+
142
+ # get the text config dict if we are loading from CLIPConfig
143
+ if config_dict.get("model_type") == "clip":
144
+ config_dict = config_dict["text_config"]
145
+
146
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
147
+ logger.warning(
148
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
149
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
150
+ )
151
+
152
+ return cls.from_dict(config_dict, **kwargs)
153
+
154
+
155
+ class CLIPVisionConfig(PretrainedConfig):
156
+ r"""
157
+ This is the configuration class to store the configuration of a [`CLIPVisionModel`]. It is used to instantiate a
158
+ CLIP vision encoder according to the specified arguments, defining the model architecture. Instantiating a
159
+ configuration with the defaults will yield a similar configuration to that of the vision encoder of the CLIP
160
+ [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
161
+
162
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
163
+ documentation from [`PretrainedConfig`] for more information.
164
+
165
+ Args:
166
+ hidden_size (`int`, *optional*, defaults to 768):
167
+ Dimensionality of the encoder layers and the pooler layer.
168
+ intermediate_size (`int`, *optional*, defaults to 3072):
169
+ Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
170
+ projection_dim (`int`, *optional*, defaults to 512):
171
+ Dimentionality of text and vision projection layers.
172
+ num_hidden_layers (`int`, *optional*, defaults to 12):
173
+ Number of hidden layers in the Transformer encoder.
174
+ num_attention_heads (`int`, *optional*, defaults to 12):
175
+ Number of attention heads for each attention layer in the Transformer encoder.
176
+ num_channels (`int`, *optional*, defaults to 3):
177
+ The number of input channels.
178
+ image_size (`int`, *optional*, defaults to 224):
179
+ The size (resolution) of each image.
180
+ patch_size (`int`, *optional*, defaults to 32):
181
+ The size (resolution) of each patch.
182
+ hidden_act (`str` or `function`, *optional*, defaults to `"quick_gelu"`):
183
+ The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
184
+ `"relu"`, `"selu"` and `"gelu_new"` ``"quick_gelu"` are supported.
185
+ layer_norm_eps (`float`, *optional*, defaults to 1e-05):
186
+ The epsilon used by the layer normalization layers.
187
+ attention_dropout (`float`, *optional*, defaults to 0.0):
188
+ The dropout ratio for the attention probabilities.
189
+ initializer_range (`float`, *optional*, defaults to 0.02):
190
+ The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
191
+ initializer_factor (`float`, *optional*, defaults to 1.0):
192
+ A factor for initializing all weight matrices (should be kept to 1, used internally for initialization
193
+ testing).
194
+
195
+ Example:
196
+
197
+ ```python
198
+ >>> from transformers import CLIPVisionConfig, CLIPVisionModel
199
+
200
+ >>> # Initializing a CLIPVisionConfig with openai/clip-vit-base-patch32 style configuration
201
+ >>> configuration = CLIPVisionConfig()
202
+
203
+ >>> # Initializing a CLIPVisionModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
204
+ >>> model = CLIPVisionModel(configuration)
205
+
206
+ >>> # Accessing the model configuration
207
+ >>> configuration = model.config
208
+ ```"""
209
+
210
+ model_type = "clip_vision_model"
211
+
212
+ def __init__(
213
+ self,
214
+ hidden_size=768,
215
+ intermediate_size=3072,
216
+ projection_dim=512,
217
+ num_hidden_layers=12,
218
+ num_attention_heads=12,
219
+ num_channels=3,
220
+ image_size=224,
221
+ patch_size=32,
222
+ hidden_act="quick_gelu",
223
+ layer_norm_eps=1e-5,
224
+ attention_dropout=0.0,
225
+ initializer_range=0.02,
226
+ initializer_factor=1.0,
227
+ **kwargs,
228
+ ):
229
+ super().__init__(**kwargs)
230
+
231
+ self.hidden_size = hidden_size
232
+ self.intermediate_size = intermediate_size
233
+ self.projection_dim = projection_dim
234
+ self.num_hidden_layers = num_hidden_layers
235
+ self.num_attention_heads = num_attention_heads
236
+ self.num_channels = num_channels
237
+ self.patch_size = patch_size
238
+ self.image_size = image_size
239
+ self.initializer_range = initializer_range
240
+ self.initializer_factor = initializer_factor
241
+ self.attention_dropout = attention_dropout
242
+ self.layer_norm_eps = layer_norm_eps
243
+ self.hidden_act = hidden_act
244
+
245
+ @classmethod
246
+ def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
247
+ cls._set_token_in_kwargs(kwargs)
248
+
249
+ config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
250
+
251
+ # get the vision config dict if we are loading from CLIPConfig
252
+ if config_dict.get("model_type") == "clip":
253
+ config_dict = config_dict["vision_config"]
254
+
255
+ if "model_type" in config_dict and hasattr(cls, "model_type") and config_dict["model_type"] != cls.model_type:
256
+ logger.warning(
257
+ f"You are using a model of type {config_dict['model_type']} to instantiate a model of type "
258
+ f"{cls.model_type}. This is not supported for all configurations of models and can yield errors."
259
+ )
260
+
261
+ return cls.from_dict(config_dict, **kwargs)
262
+
263
+
264
+ class CLIPConfig(PretrainedConfig):
265
+ r"""
266
+ [`CLIPConfig`] is the configuration class to store the configuration of a [`CLIPModel`]. It is used to instantiate
267
+ a CLIP model according to the specified arguments, defining the text model and vision model configs. Instantiating
268
+ a configuration with the defaults will yield a similar configuration to that of the CLIP
269
+ [openai/clip-vit-base-patch32](https://huggingface.co/openai/clip-vit-base-patch32) architecture.
270
+
271
+ Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the
272
+ documentation from [`PretrainedConfig`] for more information.
273
+
274
+ Args:
275
+ text_config (`dict`, *optional*):
276
+ Dictionary of configuration options used to initialize [`CLIPTextConfig`].
277
+ vision_config (`dict`, *optional*):
278
+ Dictionary of configuration options used to initialize [`CLIPVisionConfig`].
279
+ projection_dim (`int`, *optional*, defaults to 512):
280
+ Dimentionality of text and vision projection layers.
281
+ logit_scale_init_value (`float`, *optional*, defaults to 2.6592):
282
+ The inital value of the *logit_scale* paramter. Default is used as per the original CLIP implementation.
283
+ kwargs (*optional*):
284
+ Dictionary of keyword arguments.
285
+
286
+ Example:
287
+
288
+ ```python
289
+ >>> from transformers import CLIPConfig, CLIPModel
290
+
291
+ >>> # Initializing a CLIPConfig with openai/clip-vit-base-patch32 style configuration
292
+ >>> configuration = CLIPConfig()
293
+
294
+ >>> # Initializing a CLIPModel (with random weights) from the openai/clip-vit-base-patch32 style configuration
295
+ >>> model = CLIPModel(configuration)
296
+
297
+ >>> # Accessing the model configuration
298
+ >>> configuration = model.config
299
+
300
+ >>> # We can also initialize a CLIPConfig from a CLIPTextConfig and a CLIPVisionConfig
301
+ >>> from transformers import CLIPTextConfig, CLIPVisionConfig
302
+
303
+ >>> # Initializing a CLIPText and CLIPVision configuration
304
+ >>> config_text = CLIPTextConfig()
305
+ >>> config_vision = CLIPVisionConfig()
306
+
307
+ >>> config = CLIPConfig.from_text_vision_configs(config_text, config_vision)
308
+ ```"""
309
+
310
+ model_type = "clip"
311
+
312
+ def __init__(
313
+ self, text_config=None, vision_config=None, projection_dim=512, logit_scale_init_value=2.6592, **kwargs
314
+ ):
315
+ # If `_config_dict` exist, we use them for the backward compatibility.
316
+ # We pop out these 2 attributes before calling `super().__init__` to avoid them being saved (which causes a lot
317
+ # of confusion!).
318
+ text_config_dict = kwargs.pop("text_config_dict", None)
319
+ vision_config_dict = kwargs.pop("vision_config_dict", None)
320
+
321
+ super().__init__(**kwargs)
322
+
323
+ # Instead of simply assigning `[text|vision]_config_dict` to `[text|vision]_config`, we use the values in
324
+ # `[text|vision]_config_dict` to update the values in `[text|vision]_config`. The values should be same in most
325
+ # cases, but we don't want to break anything regarding `_config_dict` that existed before commit `8827e1b2`.
326
+ if text_config_dict is not None:
327
+ if text_config is None:
328
+ text_config = {}
329
+
330
+ # This is the complete result when using `text_config_dict`.
331
+ _text_config_dict = CLIPTextConfig(**text_config_dict).to_dict()
332
+
333
+ # Give a warning if the values exist in both `_text_config_dict` and `text_config` but being different.
334
+ for key, value in _text_config_dict.items():
335
+ if key in text_config and value != text_config[key] and key not in ["transformers_version"]:
336
+ # If specified in `text_config_dict`
337
+ if key in text_config_dict:
338
+ message = (
339
+ f"`{key}` is found in both `text_config_dict` and `text_config` but with different values. "
340
+ f'The value `text_config_dict["{key}"]` will be used instead.'
341
+ )
342
+ # If inferred from default argument values (just to be super careful)
343
+ else:
344
+ message = (
345
+ f"`text_config_dict` is provided which will be used to initialize `CLIPTextConfig`. The "
346
+ f'value `text_config["{key}"]` will be overriden.'
347
+ )
348
+ logger.info(message)
349
+
350
+ # Update all values in `text_config` with the ones in `_text_config_dict`.
351
+ text_config.update(_text_config_dict)
352
+
353
+ if vision_config_dict is not None:
354
+ if vision_config is None:
355
+ vision_config = {}
356
+
357
+ # This is the complete result when using `vision_config_dict`.
358
+ _vision_config_dict = CLIPVisionConfig(**vision_config_dict).to_dict()
359
+ # convert keys to string instead of integer
360
+ if "id2label" in _vision_config_dict:
361
+ _vision_config_dict["id2label"] = {
362
+ str(key): value for key, value in _vision_config_dict["id2label"].items()
363
+ }
364
+
365
+ # Give a warning if the values exist in both `_vision_config_dict` and `vision_config` but being different.
366
+ for key, value in _vision_config_dict.items():
367
+ if key in vision_config and value != vision_config[key] and key not in ["transformers_version"]:
368
+ # If specified in `vision_config_dict`
369
+ if key in vision_config_dict:
370
+ message = (
371
+ f"`{key}` is found in both `vision_config_dict` and `vision_config` but with different "
372
+ f'values. The value `vision_config_dict["{key}"]` will be used instead.'
373
+ )
374
+ # If inferred from default argument values (just to be super careful)
375
+ else:
376
+ message = (
377
+ f"`vision_config_dict` is provided which will be used to initialize `CLIPVisionConfig`. "
378
+ f'The value `vision_config["{key}"]` will be overriden.'
379
+ )
380
+ logger.info(message)
381
+
382
+ # Update all values in `vision_config` with the ones in `_vision_config_dict`.
383
+ vision_config.update(_vision_config_dict)
384
+
385
+ if text_config is None:
386
+ text_config = {}
387
+ logger.info("`text_config` is `None`. Initializing the `CLIPTextConfig` with default values.")
388
+
389
+ if vision_config is None:
390
+ vision_config = {}
391
+ logger.info("`vision_config` is `None`. initializing the `CLIPVisionConfig` with default values.")
392
+
393
+ self.text_config = CLIPTextConfig(**text_config)
394
+ self.vision_config = CLIPVisionConfig(**vision_config)
395
+
396
+ self.projection_dim = projection_dim
397
+ self.logit_scale_init_value = logit_scale_init_value
398
+ self.initializer_factor = 1.0
399
+
400
+ @classmethod
401
+ def from_text_vision_configs(cls, text_config: CLIPTextConfig, vision_config: CLIPVisionConfig, **kwargs):
402
+ r"""
403
+ Instantiate a [`CLIPConfig`] (or a derived class) from clip text model configuration and clip vision model
404
+ configuration.
405
+
406
+ Returns:
407
+ [`CLIPConfig`]: An instance of a configuration object
408
+ """
409
+
410
+ return cls(text_config=text_config.to_dict(), vision_config=vision_config.to_dict(), **kwargs)
411
+
412
+
413
+ class CLIPOnnxConfig(OnnxConfig):
414
+ @property
415
+ def inputs(self) -> Mapping[str, Mapping[int, str]]:
416
+ return OrderedDict(
417
+ [
418
+ ("input_ids", {0: "batch", 1: "sequence"}),
419
+ ("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
420
+ ("attention_mask", {0: "batch", 1: "sequence"}),
421
+ ]
422
+ )
423
+
424
+ @property
425
+ def outputs(self) -> Mapping[str, Mapping[int, str]]:
426
+ return OrderedDict(
427
+ [
428
+ ("logits_per_image", {0: "batch"}),
429
+ ("logits_per_text", {0: "batch"}),
430
+ ("text_embeds", {0: "batch"}),
431
+ ("image_embeds", {0: "batch"}),
432
+ ]
433
+ )
434
+
435
+ @property
436
+ def atol_for_validation(self) -> float:
437
+ return 1e-4
438
+
439
+ def generate_dummy_inputs(
440
+ self,
441
+ processor: "ProcessorMixin",
442
+ batch_size: int = -1,
443
+ seq_length: int = -1,
444
+ framework: Optional["TensorType"] = None,
445
+ ) -> Mapping[str, Any]:
446
+ text_input_dict = super().generate_dummy_inputs(
447
+ processor.tokenizer, batch_size=batch_size, seq_length=seq_length, framework=framework
448
+ )
449
+ image_input_dict = super().generate_dummy_inputs(
450
+ processor.image_processor, batch_size=batch_size, framework=framework
451
+ )
452
+ return {**text_input_dict, **image_input_dict}
453
+
454
+ @property
455
+ def default_onnx_opset(self) -> int:
456
+ return 14
venv/lib/python3.10/site-packages/transformers/models/clip/convert_clip_original_pytorch_to_hf.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+
18
+ import torch
19
+ from clip import load
20
+
21
+ from transformers import CLIPConfig, CLIPModel
22
+
23
+
24
+ def copy_attn_layer(hf_attn_layer, pt_attn_layer):
25
+ q_proj, k_proj, v_proj = pt_attn_layer.in_proj_weight.chunk(3, dim=0)
26
+ q_proj_bias, k_proj_bias, v_proj_bias = pt_attn_layer.in_proj_bias.chunk(3, dim=0)
27
+
28
+ out_proj_weights = pt_attn_layer.out_proj.weight
29
+ out_proj_bias = pt_attn_layer.out_proj.bias
30
+
31
+ hf_attn_layer.q_proj.weight.data = q_proj
32
+ hf_attn_layer.q_proj.bias.data = q_proj_bias
33
+
34
+ hf_attn_layer.k_proj.weight.data = k_proj
35
+ hf_attn_layer.k_proj.bias.data = k_proj_bias
36
+
37
+ hf_attn_layer.v_proj.weight.data = v_proj
38
+ hf_attn_layer.v_proj.bias.data = v_proj_bias
39
+
40
+ hf_attn_layer.out_proj.weight = out_proj_weights
41
+ hf_attn_layer.out_proj.bias = out_proj_bias
42
+
43
+
44
+ def copy_mlp(hf_mlp, pt_mlp):
45
+ copy_linear(hf_mlp.fc1, pt_mlp.c_fc)
46
+ copy_linear(hf_mlp.fc2, pt_mlp.c_proj)
47
+
48
+
49
+ def copy_linear(hf_linear, pt_linear):
50
+ hf_linear.weight = pt_linear.weight
51
+ hf_linear.bias = pt_linear.bias
52
+
53
+
54
+ def copy_layer(hf_layer, pt_layer):
55
+ # copy layer norms
56
+ copy_linear(hf_layer.layer_norm1, pt_layer.ln_1)
57
+ copy_linear(hf_layer.layer_norm2, pt_layer.ln_2)
58
+
59
+ # copy MLP
60
+ copy_mlp(hf_layer.mlp, pt_layer.mlp)
61
+
62
+ # copy attn
63
+ copy_attn_layer(hf_layer.self_attn, pt_layer.attn)
64
+
65
+
66
+ def copy_layers(hf_layers, pt_layers):
67
+ for hf_layer, pt_layer in zip(hf_layers, pt_layers):
68
+ copy_layer(hf_layer, pt_layer)
69
+
70
+
71
+ def copy_encoder(hf_encoder, pt_model):
72
+ # copy embeds
73
+ hf_encoder.embeddings.token_embedding.weight = pt_model.token_embedding.weight
74
+ hf_encoder.embeddings.position_embedding.weight.data = pt_model.positional_embedding
75
+
76
+ # copy layer norm
77
+ copy_linear(hf_encoder.final_layer_norm, pt_model.ln_final)
78
+
79
+ # copy hidden layers
80
+ copy_layers(hf_encoder.encoder.layers, pt_model.transformer.resblocks)
81
+
82
+
83
+ def copy_text_model_and_projection(hf_model, pt_model):
84
+ # copy projection
85
+ hf_model.text_projection.weight.data = pt_model.text_projection.data.T
86
+
87
+ # copy text encoder
88
+ copy_encoder(hf_model.text_model, pt_model)
89
+
90
+
91
+ def copy_vison_model_and_projection(hf_model, pt_model):
92
+ # copy projection
93
+ hf_model.visual_projection.weight.data = pt_model.visual.proj.data.T
94
+
95
+ # copy layer norms
96
+ copy_linear(hf_model.vision_model.pre_layrnorm, pt_model.visual.ln_pre)
97
+ copy_linear(hf_model.vision_model.post_layernorm, pt_model.visual.ln_post)
98
+
99
+ # copy embeds
100
+ hf_model.vision_model.embeddings.patch_embedding.weight.data = pt_model.visual.conv1.weight.data
101
+ hf_model.vision_model.embeddings.class_embedding = pt_model.visual.class_embedding
102
+ hf_model.vision_model.embeddings.position_embedding.weight.data = pt_model.visual.positional_embedding.data
103
+
104
+ # copy encoder
105
+ copy_layers(hf_model.vision_model.encoder.layers, pt_model.visual.transformer.resblocks)
106
+
107
+
108
+ @torch.no_grad()
109
+ def convert_clip_checkpoint(checkpoint_path, pytorch_dump_folder_path, config_path=None):
110
+ """
111
+ Copy/paste/tweak model's weights to transformers design.
112
+ """
113
+ if config_path is not None:
114
+ config = CLIPConfig.from_pretrained(config_path)
115
+ else:
116
+ config = CLIPConfig(projection_dim=512, text_config={}, vision_config={})
117
+
118
+ hf_model = CLIPModel(config).eval()
119
+
120
+ pt_model, _ = load(checkpoint_path, device="cpu", jit=False)
121
+ pt_model = pt_model.eval()
122
+
123
+ copy_text_model_and_projection(hf_model, pt_model)
124
+ copy_vison_model_and_projection(hf_model, pt_model)
125
+ hf_model.logit_scale = pt_model.logit_scale
126
+
127
+ input_ids = torch.arange(0, 77).unsqueeze(0)
128
+ pixel_values = torch.randn(1, 3, 224, 224)
129
+
130
+ hf_outputs = hf_model(input_ids=input_ids, pixel_values=pixel_values, return_dict=True)
131
+ hf_logits_per_image = hf_outputs.logits_per_image
132
+ hf_logits_per_text = hf_outputs.logits_per_text
133
+ pt_logits_per_image, pt_logits_per_text = pt_model(pixel_values, input_ids)
134
+
135
+ assert torch.allclose(hf_logits_per_image, pt_logits_per_image, atol=1e-3)
136
+ assert torch.allclose(hf_logits_per_text, pt_logits_per_text, atol=1e-3)
137
+
138
+ hf_model.save_pretrained(pytorch_dump_folder_path)
139
+
140
+
141
+ if __name__ == "__main__":
142
+ parser = argparse.ArgumentParser()
143
+ parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
144
+ parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
145
+ parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
146
+ args = parser.parse_args()
147
+
148
+ convert_clip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
venv/lib/python3.10/site-packages/transformers/models/clip/feature_extraction_clip.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Feature extractor class for CLIP."""
16
+
17
+ import warnings
18
+
19
+ from ...utils import logging
20
+ from .image_processing_clip import CLIPImageProcessor
21
+
22
+
23
+ logger = logging.get_logger(__name__)
24
+
25
+
26
+ class CLIPFeatureExtractor(CLIPImageProcessor):
27
+ def __init__(self, *args, **kwargs) -> None:
28
+ warnings.warn(
29
+ "The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"
30
+ " use CLIPImageProcessor instead.",
31
+ FutureWarning,
32
+ )
33
+ super().__init__(*args, **kwargs)
venv/lib/python3.10/site-packages/transformers/models/clip/image_processing_clip.py ADDED
@@ -0,0 +1,346 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2022 The HuggingFace Inc. team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Image processor class for CLIP."""
16
+
17
+ from typing import Dict, List, Optional, Union
18
+
19
+ import numpy as np
20
+
21
+ from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
22
+ from ...image_transforms import (
23
+ convert_to_rgb,
24
+ get_resize_output_image_size,
25
+ resize,
26
+ to_channel_dimension_format,
27
+ )
28
+ from ...image_utils import (
29
+ OPENAI_CLIP_MEAN,
30
+ OPENAI_CLIP_STD,
31
+ ChannelDimension,
32
+ ImageInput,
33
+ PILImageResampling,
34
+ infer_channel_dimension_format,
35
+ is_scaled_image,
36
+ make_list_of_images,
37
+ to_numpy_array,
38
+ valid_images,
39
+ validate_kwargs,
40
+ validate_preprocess_arguments,
41
+ )
42
+ from ...utils import TensorType, is_vision_available, logging
43
+
44
+
45
+ logger = logging.get_logger(__name__)
46
+
47
+
48
+ if is_vision_available():
49
+ import PIL
50
+
51
+
52
+ class CLIPImageProcessor(BaseImageProcessor):
53
+ r"""
54
+ Constructs a CLIP image processor.
55
+
56
+ Args:
57
+ do_resize (`bool`, *optional*, defaults to `True`):
58
+ Whether to resize the image's (height, width) dimensions to the specified `size`. Can be overridden by
59
+ `do_resize` in the `preprocess` method.
60
+ size (`Dict[str, int]` *optional*, defaults to `{"shortest_edge": 224}`):
61
+ Size of the image after resizing. The shortest edge of the image is resized to size["shortest_edge"], with
62
+ the longest edge resized to keep the input aspect ratio. Can be overridden by `size` in the `preprocess`
63
+ method.
64
+ resample (`PILImageResampling`, *optional*, defaults to `Resampling.BICUBIC`):
65
+ Resampling filter to use if resizing the image. Can be overridden by `resample` in the `preprocess` method.
66
+ do_center_crop (`bool`, *optional*, defaults to `True`):
67
+ Whether to center crop the image to the specified `crop_size`. Can be overridden by `do_center_crop` in the
68
+ `preprocess` method.
69
+ crop_size (`Dict[str, int]` *optional*, defaults to 224):
70
+ Size of the output image after applying `center_crop`. Can be overridden by `crop_size` in the `preprocess`
71
+ method.
72
+ do_rescale (`bool`, *optional*, defaults to `True`):
73
+ Whether to rescale the image by the specified scale `rescale_factor`. Can be overridden by `do_rescale` in
74
+ the `preprocess` method.
75
+ rescale_factor (`int` or `float`, *optional*, defaults to `1/255`):
76
+ Scale factor to use if rescaling the image. Can be overridden by `rescale_factor` in the `preprocess`
77
+ method.
78
+ do_normalize (`bool`, *optional*, defaults to `True`):
79
+ Whether to normalize the image. Can be overridden by `do_normalize` in the `preprocess` method.
80
+ image_mean (`float` or `List[float]`, *optional*, defaults to `[0.48145466, 0.4578275, 0.40821073]`):
81
+ Mean to use if normalizing the image. This is a float or list of floats the length of the number of
82
+ channels in the image. Can be overridden by the `image_mean` parameter in the `preprocess` method.
83
+ image_std (`float` or `List[float]`, *optional*, defaults to `[0.26862954, 0.26130258, 0.27577711]`):
84
+ Standard deviation to use if normalizing the image. This is a float or list of floats the length of the
85
+ number of channels in the image. Can be overridden by the `image_std` parameter in the `preprocess` method.
86
+ Can be overridden by the `image_std` parameter in the `preprocess` method.
87
+ do_convert_rgb (`bool`, *optional*, defaults to `True`):
88
+ Whether to convert the image to RGB.
89
+ """
90
+
91
+ model_input_names = ["pixel_values"]
92
+
93
+ def __init__(
94
+ self,
95
+ do_resize: bool = True,
96
+ size: Dict[str, int] = None,
97
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
98
+ do_center_crop: bool = True,
99
+ crop_size: Dict[str, int] = None,
100
+ do_rescale: bool = True,
101
+ rescale_factor: Union[int, float] = 1 / 255,
102
+ do_normalize: bool = True,
103
+ image_mean: Optional[Union[float, List[float]]] = None,
104
+ image_std: Optional[Union[float, List[float]]] = None,
105
+ do_convert_rgb: bool = True,
106
+ **kwargs,
107
+ ) -> None:
108
+ super().__init__(**kwargs)
109
+ size = size if size is not None else {"shortest_edge": 224}
110
+ size = get_size_dict(size, default_to_square=False)
111
+ crop_size = crop_size if crop_size is not None else {"height": 224, "width": 224}
112
+ crop_size = get_size_dict(crop_size, default_to_square=True, param_name="crop_size")
113
+
114
+ self.do_resize = do_resize
115
+ self.size = size
116
+ self.resample = resample
117
+ self.do_center_crop = do_center_crop
118
+ self.crop_size = crop_size
119
+ self.do_rescale = do_rescale
120
+ self.rescale_factor = rescale_factor
121
+ self.do_normalize = do_normalize
122
+ self.image_mean = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
123
+ self.image_std = image_std if image_std is not None else OPENAI_CLIP_STD
124
+ self.do_convert_rgb = do_convert_rgb
125
+ self._valid_processor_keys = [
126
+ "images",
127
+ "do_resize",
128
+ "size",
129
+ "resample",
130
+ "do_center_crop",
131
+ "crop_size",
132
+ "do_rescale",
133
+ "rescale_factor",
134
+ "do_normalize",
135
+ "image_mean",
136
+ "image_std",
137
+ "do_convert_rgb",
138
+ "return_tensors",
139
+ "data_format",
140
+ "input_data_format",
141
+ ]
142
+
143
+ # for backwards compatibility of KOSMOS-2
144
+ if "use_square_size" in kwargs:
145
+ self.size = {"height": size["shortest_edge"], "width": size["shortest_edge"]}
146
+
147
+ def resize(
148
+ self,
149
+ image: np.ndarray,
150
+ size: Dict[str, int],
151
+ resample: PILImageResampling = PILImageResampling.BICUBIC,
152
+ data_format: Optional[Union[str, ChannelDimension]] = None,
153
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
154
+ **kwargs,
155
+ ) -> np.ndarray:
156
+ """
157
+ Resize an image. The shortest edge of the image is resized to size["shortest_edge"], with the longest edge
158
+ resized to keep the input aspect ratio.
159
+
160
+ Args:
161
+ image (`np.ndarray`):
162
+ Image to resize.
163
+ size (`Dict[str, int]`):
164
+ Size of the output image.
165
+ resample (`PILImageResampling`, *optional*, defaults to `PILImageResampling.BICUBIC`):
166
+ Resampling filter to use when resiizing the image.
167
+ data_format (`str` or `ChannelDimension`, *optional*):
168
+ The channel dimension format of the image. If not provided, it will be the same as the input image.
169
+ input_data_format (`ChannelDimension` or `str`, *optional*):
170
+ The channel dimension format of the input image. If not provided, it will be inferred.
171
+ """
172
+ default_to_square = True
173
+ if "shortest_edge" in size:
174
+ size = size["shortest_edge"]
175
+ default_to_square = False
176
+ elif "height" in size and "width" in size:
177
+ size = (size["height"], size["width"])
178
+ else:
179
+ raise ValueError("Size must contain either 'shortest_edge' or 'height' and 'width'.")
180
+
181
+ output_size = get_resize_output_image_size(
182
+ image,
183
+ size=size,
184
+ default_to_square=default_to_square,
185
+ input_data_format=input_data_format,
186
+ )
187
+ return resize(
188
+ image,
189
+ size=output_size,
190
+ resample=resample,
191
+ data_format=data_format,
192
+ input_data_format=input_data_format,
193
+ **kwargs,
194
+ )
195
+
196
+ def preprocess(
197
+ self,
198
+ images: ImageInput,
199
+ do_resize: bool = None,
200
+ size: Dict[str, int] = None,
201
+ resample: PILImageResampling = None,
202
+ do_center_crop: bool = None,
203
+ crop_size: int = None,
204
+ do_rescale: bool = None,
205
+ rescale_factor: float = None,
206
+ do_normalize: bool = None,
207
+ image_mean: Optional[Union[float, List[float]]] = None,
208
+ image_std: Optional[Union[float, List[float]]] = None,
209
+ do_convert_rgb: bool = None,
210
+ return_tensors: Optional[Union[str, TensorType]] = None,
211
+ data_format: Optional[ChannelDimension] = ChannelDimension.FIRST,
212
+ input_data_format: Optional[Union[str, ChannelDimension]] = None,
213
+ **kwargs,
214
+ ) -> PIL.Image.Image:
215
+ """
216
+ Preprocess an image or batch of images.
217
+
218
+ Args:
219
+ images (`ImageInput`):
220
+ Image to preprocess. Expects a single or batch of images with pixel values ranging from 0 to 255. If
221
+ passing in images with pixel values between 0 and 1, set `do_rescale=False`.
222
+ do_resize (`bool`, *optional*, defaults to `self.do_resize`):
223
+ Whether to resize the image.
224
+ size (`Dict[str, int]`, *optional*, defaults to `self.size`):
225
+ Size of the image after resizing. Shortest edge of the image is resized to size["shortest_edge"], with
226
+ the longest edge resized to keep the input aspect ratio.
227
+ resample (`int`, *optional*, defaults to `self.resample`):
228
+ Resampling filter to use if resizing the image. This can be one of the enum `PILImageResampling`. Only
229
+ has an effect if `do_resize` is set to `True`.
230
+ do_center_crop (`bool`, *optional*, defaults to `self.do_center_crop`):
231
+ Whether to center crop the image.
232
+ crop_size (`Dict[str, int]`, *optional*, defaults to `self.crop_size`):
233
+ Size of the center crop. Only has an effect if `do_center_crop` is set to `True`.
234
+ do_rescale (`bool`, *optional*, defaults to `self.do_rescale`):
235
+ Whether to rescale the image.
236
+ rescale_factor (`float`, *optional*, defaults to `self.rescale_factor`):
237
+ Rescale factor to rescale the image by if `do_rescale` is set to `True`.
238
+ do_normalize (`bool`, *optional*, defaults to `self.do_normalize`):
239
+ Whether to normalize the image.
240
+ image_mean (`float` or `List[float]`, *optional*, defaults to `self.image_mean`):
241
+ Image mean to use for normalization. Only has an effect if `do_normalize` is set to `True`.
242
+ image_std (`float` or `List[float]`, *optional*, defaults to `self.image_std`):
243
+ Image standard deviation to use for normalization. Only has an effect if `do_normalize` is set to
244
+ `True`.
245
+ do_convert_rgb (`bool`, *optional*, defaults to `self.do_convert_rgb`):
246
+ Whether to convert the image to RGB.
247
+ return_tensors (`str` or `TensorType`, *optional*):
248
+ The type of tensors to return. Can be one of:
249
+ - Unset: Return a list of `np.ndarray`.
250
+ - `TensorType.TENSORFLOW` or `'tf'`: Return a batch of type `tf.Tensor`.
251
+ - `TensorType.PYTORCH` or `'pt'`: Return a batch of type `torch.Tensor`.
252
+ - `TensorType.NUMPY` or `'np'`: Return a batch of type `np.ndarray`.
253
+ - `TensorType.JAX` or `'jax'`: Return a batch of type `jax.numpy.ndarray`.
254
+ data_format (`ChannelDimension` or `str`, *optional*, defaults to `ChannelDimension.FIRST`):
255
+ The channel dimension format for the output image. Can be one of:
256
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
257
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
258
+ - Unset: Use the channel dimension format of the input image.
259
+ input_data_format (`ChannelDimension` or `str`, *optional*):
260
+ The channel dimension format for the input image. If unset, the channel dimension format is inferred
261
+ from the input image. Can be one of:
262
+ - `"channels_first"` or `ChannelDimension.FIRST`: image in (num_channels, height, width) format.
263
+ - `"channels_last"` or `ChannelDimension.LAST`: image in (height, width, num_channels) format.
264
+ - `"none"` or `ChannelDimension.NONE`: image in (height, width) format.
265
+ """
266
+ do_resize = do_resize if do_resize is not None else self.do_resize
267
+ size = size if size is not None else self.size
268
+ size = get_size_dict(size, param_name="size", default_to_square=False)
269
+ resample = resample if resample is not None else self.resample
270
+ do_center_crop = do_center_crop if do_center_crop is not None else self.do_center_crop
271
+ crop_size = crop_size if crop_size is not None else self.crop_size
272
+ crop_size = get_size_dict(crop_size, param_name="crop_size", default_to_square=True)
273
+ do_rescale = do_rescale if do_rescale is not None else self.do_rescale
274
+ rescale_factor = rescale_factor if rescale_factor is not None else self.rescale_factor
275
+ do_normalize = do_normalize if do_normalize is not None else self.do_normalize
276
+ image_mean = image_mean if image_mean is not None else self.image_mean
277
+ image_std = image_std if image_std is not None else self.image_std
278
+ do_convert_rgb = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
279
+
280
+ validate_kwargs(captured_kwargs=kwargs.keys(), valid_processor_keys=self._valid_processor_keys)
281
+
282
+ images = make_list_of_images(images)
283
+
284
+ if not valid_images(images):
285
+ raise ValueError(
286
+ "Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
287
+ "torch.Tensor, tf.Tensor or jax.ndarray."
288
+ )
289
+ validate_preprocess_arguments(
290
+ do_rescale=do_rescale,
291
+ rescale_factor=rescale_factor,
292
+ do_normalize=do_normalize,
293
+ image_mean=image_mean,
294
+ image_std=image_std,
295
+ do_center_crop=do_center_crop,
296
+ crop_size=crop_size,
297
+ do_resize=do_resize,
298
+ size=size,
299
+ resample=resample,
300
+ )
301
+
302
+ if do_convert_rgb:
303
+ images = [convert_to_rgb(image) for image in images]
304
+
305
+ # All transformations expect numpy arrays.
306
+ images = [to_numpy_array(image) for image in images]
307
+
308
+ if is_scaled_image(images[0]) and do_rescale:
309
+ logger.warning_once(
310
+ "It looks like you are trying to rescale already rescaled images. If the input"
311
+ " images have pixel values between 0 and 1, set `do_rescale=False` to avoid rescaling them again."
312
+ )
313
+
314
+ if input_data_format is None:
315
+ # We assume that all images have the same channel dimension format.
316
+ input_data_format = infer_channel_dimension_format(images[0])
317
+
318
+ if do_resize:
319
+ images = [
320
+ self.resize(image=image, size=size, resample=resample, input_data_format=input_data_format)
321
+ for image in images
322
+ ]
323
+
324
+ if do_center_crop:
325
+ images = [
326
+ self.center_crop(image=image, size=crop_size, input_data_format=input_data_format) for image in images
327
+ ]
328
+
329
+ if do_rescale:
330
+ images = [
331
+ self.rescale(image=image, scale=rescale_factor, input_data_format=input_data_format)
332
+ for image in images
333
+ ]
334
+
335
+ if do_normalize:
336
+ images = [
337
+ self.normalize(image=image, mean=image_mean, std=image_std, input_data_format=input_data_format)
338
+ for image in images
339
+ ]
340
+
341
+ images = [
342
+ to_channel_dimension_format(image, data_format, input_channel_dim=input_data_format) for image in images
343
+ ]
344
+
345
+ data = {"pixel_values": images}
346
+ return BatchFeature(data=data, tensor_type=return_tensors)
venv/lib/python3.10/site-packages/transformers/models/clip/modeling_clip.py ADDED
@@ -0,0 +1,1416 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ PyTorch CLIP model."""
16
+
17
+
18
+ from dataclasses import dataclass
19
+ from typing import Any, Optional, Tuple, Union
20
+
21
+ import torch
22
+ import torch.utils.checkpoint
23
+ from torch import nn
24
+ from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
25
+
26
+ from ...activations import ACT2FN
27
+ from ...modeling_attn_mask_utils import _create_4d_causal_attention_mask, _prepare_4d_attention_mask
28
+ from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
29
+ from ...modeling_utils import PreTrainedModel
30
+ from ...utils import (
31
+ ModelOutput,
32
+ add_code_sample_docstrings,
33
+ add_start_docstrings,
34
+ add_start_docstrings_to_model_forward,
35
+ logging,
36
+ replace_return_docstrings,
37
+ )
38
+ from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
39
+
40
+
41
+ logger = logging.get_logger(__name__)
42
+
43
+ # General docstring
44
+ _CONFIG_FOR_DOC = "CLIPConfig"
45
+ _CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32"
46
+
47
+ # Image classification docstring
48
+ _IMAGE_CLASS_CHECKPOINT = "openai/clip-vit-base-patch32"
49
+ _IMAGE_CLASS_EXPECTED_OUTPUT = "LABEL_0"
50
+
51
+
52
+ from ..deprecated._archive_maps import CLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
53
+
54
+
55
+ # contrastive loss function, adapted from
56
+ # https://sachinruk.github.io/blog/2021-03-07-clip.html
57
+ def contrastive_loss(logits: torch.Tensor) -> torch.Tensor:
58
+ return nn.functional.cross_entropy(logits, torch.arange(len(logits), device=logits.device))
59
+
60
+
61
+ def clip_loss(similarity: torch.Tensor) -> torch.Tensor:
62
+ caption_loss = contrastive_loss(similarity)
63
+ image_loss = contrastive_loss(similarity.t())
64
+ return (caption_loss + image_loss) / 2.0
65
+
66
+
67
+ @dataclass
68
+ class CLIPVisionModelOutput(ModelOutput):
69
+ """
70
+ Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
71
+
72
+ Args:
73
+ image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
74
+ The image embeddings obtained by applying the projection layer to the pooler_output.
75
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
76
+ Sequence of hidden-states at the output of the last layer of the model.
77
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
78
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
79
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
80
+
81
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
82
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
83
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
84
+ sequence_length)`.
85
+
86
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
87
+ heads.
88
+ """
89
+
90
+ image_embeds: Optional[torch.FloatTensor] = None
91
+ last_hidden_state: torch.FloatTensor = None
92
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
93
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
94
+
95
+
96
+ @dataclass
97
+ class CLIPTextModelOutput(ModelOutput):
98
+ """
99
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
100
+
101
+ Args:
102
+ text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
103
+ The text embeddings obtained by applying the projection layer to the pooler_output.
104
+ last_hidden_state (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
105
+ Sequence of hidden-states at the output of the last layer of the model.
106
+ hidden_states (`tuple(torch.FloatTensor)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
107
+ Tuple of `torch.FloatTensor` (one for the output of the embeddings, if the model has an embedding layer, +
108
+ one for the output of each layer) of shape `(batch_size, sequence_length, hidden_size)`.
109
+
110
+ Hidden-states of the model at the output of each layer plus the optional initial embedding outputs.
111
+ attentions (`tuple(torch.FloatTensor)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
112
+ Tuple of `torch.FloatTensor` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
113
+ sequence_length)`.
114
+
115
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
116
+ heads.
117
+ """
118
+
119
+ text_embeds: Optional[torch.FloatTensor] = None
120
+ last_hidden_state: torch.FloatTensor = None
121
+ hidden_states: Optional[Tuple[torch.FloatTensor, ...]] = None
122
+ attentions: Optional[Tuple[torch.FloatTensor, ...]] = None
123
+
124
+
125
+ @dataclass
126
+ class CLIPOutput(ModelOutput):
127
+ """
128
+ Args:
129
+ loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
130
+ Contrastive loss for image-text similarity.
131
+ logits_per_image:(`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
132
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
133
+ similarity scores.
134
+ logits_per_text:(`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
135
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
136
+ similarity scores.
137
+ text_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
138
+ The text embeddings obtained by applying the projection layer to the pooled output of [`CLIPTextModel`].
139
+ image_embeds(`torch.FloatTensor` of shape `(batch_size, output_dim`):
140
+ The image embeddings obtained by applying the projection layer to the pooled output of [`CLIPVisionModel`].
141
+ text_model_output(`BaseModelOutputWithPooling`):
142
+ The output of the [`CLIPTextModel`].
143
+ vision_model_output(`BaseModelOutputWithPooling`):
144
+ The output of the [`CLIPVisionModel`].
145
+ """
146
+
147
+ loss: Optional[torch.FloatTensor] = None
148
+ logits_per_image: torch.FloatTensor = None
149
+ logits_per_text: torch.FloatTensor = None
150
+ text_embeds: torch.FloatTensor = None
151
+ image_embeds: torch.FloatTensor = None
152
+ text_model_output: BaseModelOutputWithPooling = None
153
+ vision_model_output: BaseModelOutputWithPooling = None
154
+
155
+ def to_tuple(self) -> Tuple[Any]:
156
+ return tuple(
157
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
158
+ for k in self.keys()
159
+ )
160
+
161
+
162
+ class CLIPVisionEmbeddings(nn.Module):
163
+ def __init__(self, config: CLIPVisionConfig):
164
+ super().__init__()
165
+ self.config = config
166
+ self.embed_dim = config.hidden_size
167
+ self.image_size = config.image_size
168
+ self.patch_size = config.patch_size
169
+
170
+ self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
171
+
172
+ self.patch_embedding = nn.Conv2d(
173
+ in_channels=config.num_channels,
174
+ out_channels=self.embed_dim,
175
+ kernel_size=self.patch_size,
176
+ stride=self.patch_size,
177
+ bias=False,
178
+ )
179
+
180
+ self.num_patches = (self.image_size // self.patch_size) ** 2
181
+ self.num_positions = self.num_patches + 1
182
+ self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
183
+ self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
184
+
185
+ def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
186
+ batch_size = pixel_values.shape[0]
187
+ target_dtype = self.patch_embedding.weight.dtype
188
+ patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
189
+ patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
190
+
191
+ class_embeds = self.class_embedding.expand(batch_size, 1, -1)
192
+ embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
193
+ embeddings = embeddings + self.position_embedding(self.position_ids)
194
+ return embeddings
195
+
196
+
197
+ class CLIPTextEmbeddings(nn.Module):
198
+ def __init__(self, config: CLIPTextConfig):
199
+ super().__init__()
200
+ embed_dim = config.hidden_size
201
+
202
+ self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
203
+ self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
204
+
205
+ # position_ids (1, len position emb) is contiguous in memory and exported when serialized
206
+ self.register_buffer(
207
+ "position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
208
+ )
209
+
210
+ def forward(
211
+ self,
212
+ input_ids: Optional[torch.LongTensor] = None,
213
+ position_ids: Optional[torch.LongTensor] = None,
214
+ inputs_embeds: Optional[torch.FloatTensor] = None,
215
+ ) -> torch.Tensor:
216
+ seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
217
+
218
+ if position_ids is None:
219
+ position_ids = self.position_ids[:, :seq_length]
220
+
221
+ if inputs_embeds is None:
222
+ inputs_embeds = self.token_embedding(input_ids)
223
+
224
+ position_embeddings = self.position_embedding(position_ids)
225
+ embeddings = inputs_embeds + position_embeddings
226
+
227
+ return embeddings
228
+
229
+
230
+ class CLIPAttention(nn.Module):
231
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
232
+
233
+ def __init__(self, config):
234
+ super().__init__()
235
+ self.config = config
236
+ self.embed_dim = config.hidden_size
237
+ self.num_heads = config.num_attention_heads
238
+ self.head_dim = self.embed_dim // self.num_heads
239
+ if self.head_dim * self.num_heads != self.embed_dim:
240
+ raise ValueError(
241
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
242
+ f" {self.num_heads})."
243
+ )
244
+ self.scale = self.head_dim**-0.5
245
+ self.dropout = config.attention_dropout
246
+
247
+ self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
248
+ self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
249
+ self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
250
+ self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
251
+
252
+ def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
253
+ return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
254
+
255
+ def forward(
256
+ self,
257
+ hidden_states: torch.Tensor,
258
+ attention_mask: Optional[torch.Tensor] = None,
259
+ causal_attention_mask: Optional[torch.Tensor] = None,
260
+ output_attentions: Optional[bool] = False,
261
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor], Optional[Tuple[torch.Tensor]]]:
262
+ """Input shape: Batch x Time x Channel"""
263
+
264
+ bsz, tgt_len, embed_dim = hidden_states.size()
265
+
266
+ # get query proj
267
+ query_states = self.q_proj(hidden_states) * self.scale
268
+ key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
269
+ value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
270
+
271
+ proj_shape = (bsz * self.num_heads, -1, self.head_dim)
272
+ query_states = self._shape(query_states, tgt_len, bsz).view(*proj_shape)
273
+ key_states = key_states.view(*proj_shape)
274
+ value_states = value_states.view(*proj_shape)
275
+
276
+ src_len = key_states.size(1)
277
+ attn_weights = torch.bmm(query_states, key_states.transpose(1, 2))
278
+
279
+ if attn_weights.size() != (bsz * self.num_heads, tgt_len, src_len):
280
+ raise ValueError(
281
+ f"Attention weights should be of size {(bsz * self.num_heads, tgt_len, src_len)}, but is"
282
+ f" {attn_weights.size()}"
283
+ )
284
+
285
+ # apply the causal_attention_mask first
286
+ if causal_attention_mask is not None:
287
+ if causal_attention_mask.size() != (bsz, 1, tgt_len, src_len):
288
+ raise ValueError(
289
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is"
290
+ f" {causal_attention_mask.size()}"
291
+ )
292
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + causal_attention_mask
293
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
294
+
295
+ if attention_mask is not None:
296
+ if attention_mask.size() != (bsz, 1, tgt_len, src_len):
297
+ raise ValueError(
298
+ f"Attention mask should be of size {(bsz, 1, tgt_len, src_len)}, but is {attention_mask.size()}"
299
+ )
300
+ attn_weights = attn_weights.view(bsz, self.num_heads, tgt_len, src_len) + attention_mask
301
+ attn_weights = attn_weights.view(bsz * self.num_heads, tgt_len, src_len)
302
+
303
+ attn_weights = nn.functional.softmax(attn_weights, dim=-1)
304
+
305
+ if output_attentions:
306
+ # this operation is a bit akward, but it's required to
307
+ # make sure that attn_weights keeps its gradient.
308
+ # In order to do so, attn_weights have to reshaped
309
+ # twice and have to be reused in the following
310
+ attn_weights_reshaped = attn_weights.view(bsz, self.num_heads, tgt_len, src_len)
311
+ attn_weights = attn_weights_reshaped.view(bsz * self.num_heads, tgt_len, src_len)
312
+ else:
313
+ attn_weights_reshaped = None
314
+
315
+ attn_probs = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
316
+
317
+ attn_output = torch.bmm(attn_probs, value_states)
318
+
319
+ if attn_output.size() != (bsz * self.num_heads, tgt_len, self.head_dim):
320
+ raise ValueError(
321
+ f"`attn_output` should be of size {(bsz, self.num_heads, tgt_len, self.head_dim)}, but is"
322
+ f" {attn_output.size()}"
323
+ )
324
+
325
+ attn_output = attn_output.view(bsz, self.num_heads, tgt_len, self.head_dim)
326
+ attn_output = attn_output.transpose(1, 2)
327
+ attn_output = attn_output.reshape(bsz, tgt_len, embed_dim)
328
+
329
+ attn_output = self.out_proj(attn_output)
330
+
331
+ return attn_output, attn_weights_reshaped
332
+
333
+
334
+ class CLIPMLP(nn.Module):
335
+ def __init__(self, config):
336
+ super().__init__()
337
+ self.config = config
338
+ self.activation_fn = ACT2FN[config.hidden_act]
339
+ self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
340
+ self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
341
+
342
+ def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
343
+ hidden_states = self.fc1(hidden_states)
344
+ hidden_states = self.activation_fn(hidden_states)
345
+ hidden_states = self.fc2(hidden_states)
346
+ return hidden_states
347
+
348
+
349
+ class CLIPEncoderLayer(nn.Module):
350
+ def __init__(self, config: CLIPConfig):
351
+ super().__init__()
352
+ self.embed_dim = config.hidden_size
353
+ self.self_attn = CLIPAttention(config)
354
+ self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
355
+ self.mlp = CLIPMLP(config)
356
+ self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
357
+
358
+ def forward(
359
+ self,
360
+ hidden_states: torch.Tensor,
361
+ attention_mask: torch.Tensor,
362
+ causal_attention_mask: torch.Tensor,
363
+ output_attentions: Optional[bool] = False,
364
+ ) -> Tuple[torch.FloatTensor]:
365
+ """
366
+ Args:
367
+ hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
368
+ attention_mask (`torch.FloatTensor`): attention mask of size
369
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
370
+ `(config.encoder_attention_heads,)`.
371
+ output_attentions (`bool`, *optional*):
372
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
373
+ returned tensors for more detail.
374
+ """
375
+ residual = hidden_states
376
+
377
+ hidden_states = self.layer_norm1(hidden_states)
378
+ hidden_states, attn_weights = self.self_attn(
379
+ hidden_states=hidden_states,
380
+ attention_mask=attention_mask,
381
+ causal_attention_mask=causal_attention_mask,
382
+ output_attentions=output_attentions,
383
+ )
384
+ hidden_states = residual + hidden_states
385
+
386
+ residual = hidden_states
387
+ hidden_states = self.layer_norm2(hidden_states)
388
+ hidden_states = self.mlp(hidden_states)
389
+ hidden_states = residual + hidden_states
390
+
391
+ outputs = (hidden_states,)
392
+
393
+ if output_attentions:
394
+ outputs += (attn_weights,)
395
+
396
+ return outputs
397
+
398
+
399
+ class CLIPPreTrainedModel(PreTrainedModel):
400
+ """
401
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
402
+ models.
403
+ """
404
+
405
+ config_class = CLIPConfig
406
+ base_model_prefix = "clip"
407
+ supports_gradient_checkpointing = True
408
+
409
+ def _init_weights(self, module):
410
+ """Initialize the weights"""
411
+ factor = self.config.initializer_factor
412
+ if isinstance(module, CLIPTextEmbeddings):
413
+ module.token_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
414
+ module.position_embedding.weight.data.normal_(mean=0.0, std=factor * 0.02)
415
+ elif isinstance(module, CLIPVisionEmbeddings):
416
+ factor = self.config.initializer_factor
417
+ nn.init.normal_(module.class_embedding, mean=0.0, std=module.embed_dim**-0.5 * factor)
418
+ nn.init.normal_(module.patch_embedding.weight, std=module.config.initializer_range * factor)
419
+ nn.init.normal_(module.position_embedding.weight, std=module.config.initializer_range * factor)
420
+ elif isinstance(module, CLIPAttention):
421
+ factor = self.config.initializer_factor
422
+ in_proj_std = (module.embed_dim**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
423
+ out_proj_std = (module.embed_dim**-0.5) * factor
424
+ nn.init.normal_(module.q_proj.weight, std=in_proj_std)
425
+ nn.init.normal_(module.k_proj.weight, std=in_proj_std)
426
+ nn.init.normal_(module.v_proj.weight, std=in_proj_std)
427
+ nn.init.normal_(module.out_proj.weight, std=out_proj_std)
428
+ elif isinstance(module, CLIPMLP):
429
+ factor = self.config.initializer_factor
430
+ in_proj_std = (module.config.hidden_size**-0.5) * ((2 * module.config.num_hidden_layers) ** -0.5) * factor
431
+ fc_std = (2 * module.config.hidden_size) ** -0.5 * factor
432
+ nn.init.normal_(module.fc1.weight, std=fc_std)
433
+ nn.init.normal_(module.fc2.weight, std=in_proj_std)
434
+ elif isinstance(module, CLIPModel):
435
+ nn.init.normal_(
436
+ module.text_projection.weight,
437
+ std=module.text_embed_dim**-0.5 * self.config.initializer_factor,
438
+ )
439
+ nn.init.normal_(
440
+ module.visual_projection.weight,
441
+ std=module.vision_embed_dim**-0.5 * self.config.initializer_factor,
442
+ )
443
+ elif isinstance(module, CLIPVisionModelWithProjection):
444
+ nn.init.normal_(
445
+ module.visual_projection.weight,
446
+ std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
447
+ )
448
+ elif isinstance(module, CLIPTextModelWithProjection):
449
+ nn.init.normal_(
450
+ module.text_projection.weight,
451
+ std=self.config.hidden_size**-0.5 * self.config.initializer_factor,
452
+ )
453
+
454
+ if isinstance(module, nn.LayerNorm):
455
+ module.bias.data.zero_()
456
+ module.weight.data.fill_(1.0)
457
+ if isinstance(module, nn.Linear) and module.bias is not None:
458
+ module.bias.data.zero_()
459
+
460
+
461
+ CLIP_START_DOCSTRING = r"""
462
+ This model inherits from [`PreTrainedModel`]. Check the superclass documentation for the generic methods the
463
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
464
+ etc.)
465
+
466
+ This model is also a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass.
467
+ Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage
468
+ and behavior.
469
+
470
+ Parameters:
471
+ config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
472
+ Initializing with a config file does not load the weights associated with the model, only the
473
+ configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
474
+ """
475
+
476
+ CLIP_TEXT_INPUTS_DOCSTRING = r"""
477
+ Args:
478
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
479
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
480
+ it.
481
+
482
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
483
+ [`PreTrainedTokenizer.__call__`] for details.
484
+
485
+ [What are input IDs?](../glossary#input-ids)
486
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
487
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
488
+
489
+ - 1 for tokens that are **not masked**,
490
+ - 0 for tokens that are **masked**.
491
+
492
+ [What are attention masks?](../glossary#attention-mask)
493
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
494
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
495
+ config.max_position_embeddings - 1]`.
496
+
497
+ [What are position IDs?](../glossary#position-ids)
498
+ output_attentions (`bool`, *optional*):
499
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
500
+ tensors for more detail.
501
+ output_hidden_states (`bool`, *optional*):
502
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
503
+ more detail.
504
+ return_dict (`bool`, *optional*):
505
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
506
+ """
507
+
508
+ CLIP_VISION_INPUTS_DOCSTRING = r"""
509
+ Args:
510
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
511
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
512
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
513
+ output_attentions (`bool`, *optional*):
514
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
515
+ tensors for more detail.
516
+ output_hidden_states (`bool`, *optional*):
517
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
518
+ more detail.
519
+ return_dict (`bool`, *optional*):
520
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
521
+ """
522
+
523
+ CLIP_INPUTS_DOCSTRING = r"""
524
+ Args:
525
+ input_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`):
526
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
527
+ it.
528
+
529
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
530
+ [`PreTrainedTokenizer.__call__`] for details.
531
+
532
+ [What are input IDs?](../glossary#input-ids)
533
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
534
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
535
+
536
+ - 1 for tokens that are **not masked**,
537
+ - 0 for tokens that are **masked**.
538
+
539
+ [What are attention masks?](../glossary#attention-mask)
540
+ position_ids (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
541
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
542
+ config.max_position_embeddings - 1]`.
543
+
544
+ [What are position IDs?](../glossary#position-ids)
545
+ pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
546
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
547
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
548
+ return_loss (`bool`, *optional*):
549
+ Whether or not to return the contrastive loss.
550
+ output_attentions (`bool`, *optional*):
551
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
552
+ tensors for more detail.
553
+ output_hidden_states (`bool`, *optional*):
554
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
555
+ more detail.
556
+ return_dict (`bool`, *optional*):
557
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
558
+ """
559
+
560
+
561
+ class CLIPEncoder(nn.Module):
562
+ """
563
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
564
+ [`CLIPEncoderLayer`].
565
+
566
+ Args:
567
+ config: CLIPConfig
568
+ """
569
+
570
+ def __init__(self, config: CLIPConfig):
571
+ super().__init__()
572
+ self.config = config
573
+ self.layers = nn.ModuleList([CLIPEncoderLayer(config) for _ in range(config.num_hidden_layers)])
574
+ self.gradient_checkpointing = False
575
+
576
+ def forward(
577
+ self,
578
+ inputs_embeds,
579
+ attention_mask: Optional[torch.Tensor] = None,
580
+ causal_attention_mask: Optional[torch.Tensor] = None,
581
+ output_attentions: Optional[bool] = None,
582
+ output_hidden_states: Optional[bool] = None,
583
+ return_dict: Optional[bool] = None,
584
+ ) -> Union[Tuple, BaseModelOutput]:
585
+ r"""
586
+ Args:
587
+ inputs_embeds (`torch.FloatTensor` of shape `(batch_size, sequence_length, hidden_size)`):
588
+ Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation.
589
+ This is useful if you want more control over how to convert `input_ids` indices into associated vectors
590
+ than the model's internal embedding lookup matrix.
591
+ attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
592
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
593
+
594
+ - 1 for tokens that are **not masked**,
595
+ - 0 for tokens that are **masked**.
596
+
597
+ [What are attention masks?](../glossary#attention-mask)
598
+ causal_attention_mask (`torch.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
599
+ Causal mask for the text model. Mask values selected in `[0, 1]`:
600
+
601
+ - 1 for tokens that are **not masked**,
602
+ - 0 for tokens that are **masked**.
603
+
604
+ [What are attention masks?](../glossary#attention-mask)
605
+ output_attentions (`bool`, *optional*):
606
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under
607
+ returned tensors for more detail.
608
+ output_hidden_states (`bool`, *optional*):
609
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors
610
+ for more detail.
611
+ return_dict (`bool`, *optional*):
612
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
613
+ """
614
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
615
+ output_hidden_states = (
616
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
617
+ )
618
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
619
+
620
+ encoder_states = () if output_hidden_states else None
621
+ all_attentions = () if output_attentions else None
622
+
623
+ hidden_states = inputs_embeds
624
+ for idx, encoder_layer in enumerate(self.layers):
625
+ if output_hidden_states:
626
+ encoder_states = encoder_states + (hidden_states,)
627
+ if self.gradient_checkpointing and self.training:
628
+ layer_outputs = self._gradient_checkpointing_func(
629
+ encoder_layer.__call__,
630
+ hidden_states,
631
+ attention_mask,
632
+ causal_attention_mask,
633
+ output_attentions,
634
+ )
635
+ else:
636
+ layer_outputs = encoder_layer(
637
+ hidden_states,
638
+ attention_mask,
639
+ causal_attention_mask,
640
+ output_attentions=output_attentions,
641
+ )
642
+
643
+ hidden_states = layer_outputs[0]
644
+
645
+ if output_attentions:
646
+ all_attentions = all_attentions + (layer_outputs[1],)
647
+
648
+ if output_hidden_states:
649
+ encoder_states = encoder_states + (hidden_states,)
650
+
651
+ if not return_dict:
652
+ return tuple(v for v in [hidden_states, encoder_states, all_attentions] if v is not None)
653
+ return BaseModelOutput(
654
+ last_hidden_state=hidden_states, hidden_states=encoder_states, attentions=all_attentions
655
+ )
656
+
657
+
658
+ class CLIPTextTransformer(nn.Module):
659
+ def __init__(self, config: CLIPTextConfig):
660
+ super().__init__()
661
+ self.config = config
662
+ embed_dim = config.hidden_size
663
+ self.embeddings = CLIPTextEmbeddings(config)
664
+ self.encoder = CLIPEncoder(config)
665
+ self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
666
+
667
+ # For `pooled_output` computation
668
+ self.eos_token_id = config.eos_token_id
669
+
670
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
671
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
672
+ def forward(
673
+ self,
674
+ input_ids: Optional[torch.Tensor] = None,
675
+ attention_mask: Optional[torch.Tensor] = None,
676
+ position_ids: Optional[torch.Tensor] = None,
677
+ output_attentions: Optional[bool] = None,
678
+ output_hidden_states: Optional[bool] = None,
679
+ return_dict: Optional[bool] = None,
680
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
681
+ r"""
682
+ Returns:
683
+
684
+ """
685
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
686
+ output_hidden_states = (
687
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
688
+ )
689
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
690
+
691
+ if input_ids is None:
692
+ raise ValueError("You have to specify input_ids")
693
+
694
+ input_shape = input_ids.size()
695
+ input_ids = input_ids.view(-1, input_shape[-1])
696
+
697
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
698
+
699
+ # CLIP's text model uses causal mask, prepare it here.
700
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
701
+ causal_attention_mask = _create_4d_causal_attention_mask(
702
+ input_shape, hidden_states.dtype, device=hidden_states.device
703
+ )
704
+ # expand attention_mask
705
+ if attention_mask is not None:
706
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
707
+ attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
708
+
709
+ encoder_outputs = self.encoder(
710
+ inputs_embeds=hidden_states,
711
+ attention_mask=attention_mask,
712
+ causal_attention_mask=causal_attention_mask,
713
+ output_attentions=output_attentions,
714
+ output_hidden_states=output_hidden_states,
715
+ return_dict=return_dict,
716
+ )
717
+
718
+ last_hidden_state = encoder_outputs[0]
719
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
720
+
721
+ if self.eos_token_id == 2:
722
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
723
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
724
+ # ------------------------------------------------------------
725
+ # text_embeds.shape = [batch_size, sequence_length, transformer.width]
726
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
727
+ # casting to torch.int for onnx compatibility: argmax doesn't support int64 inputs with opset 14
728
+ pooled_output = last_hidden_state[
729
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
730
+ input_ids.to(dtype=torch.int, device=last_hidden_state.device).argmax(dim=-1),
731
+ ]
732
+ else:
733
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
734
+ pooled_output = last_hidden_state[
735
+ torch.arange(last_hidden_state.shape[0], device=last_hidden_state.device),
736
+ # We need to get the first position of `eos_token_id` value (`pad_token_ids` might equal to `eos_token_id`)
737
+ (input_ids.to(dtype=torch.int, device=last_hidden_state.device) == self.eos_token_id)
738
+ .int()
739
+ .argmax(dim=-1),
740
+ ]
741
+
742
+ if not return_dict:
743
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
744
+
745
+ return BaseModelOutputWithPooling(
746
+ last_hidden_state=last_hidden_state,
747
+ pooler_output=pooled_output,
748
+ hidden_states=encoder_outputs.hidden_states,
749
+ attentions=encoder_outputs.attentions,
750
+ )
751
+
752
+
753
+ @add_start_docstrings(
754
+ """The text model from CLIP without any head or projection on top.""",
755
+ CLIP_START_DOCSTRING,
756
+ )
757
+ class CLIPTextModel(CLIPPreTrainedModel):
758
+ config_class = CLIPTextConfig
759
+
760
+ _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
761
+
762
+ def __init__(self, config: CLIPTextConfig):
763
+ super().__init__(config)
764
+ self.text_model = CLIPTextTransformer(config)
765
+ # Initialize weights and apply final processing
766
+ self.post_init()
767
+
768
+ def get_input_embeddings(self) -> nn.Module:
769
+ return self.text_model.embeddings.token_embedding
770
+
771
+ def set_input_embeddings(self, value):
772
+ self.text_model.embeddings.token_embedding = value
773
+
774
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
775
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPTextConfig)
776
+ def forward(
777
+ self,
778
+ input_ids: Optional[torch.Tensor] = None,
779
+ attention_mask: Optional[torch.Tensor] = None,
780
+ position_ids: Optional[torch.Tensor] = None,
781
+ output_attentions: Optional[bool] = None,
782
+ output_hidden_states: Optional[bool] = None,
783
+ return_dict: Optional[bool] = None,
784
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
785
+ r"""
786
+ Returns:
787
+
788
+ Examples:
789
+
790
+ ```python
791
+ >>> from transformers import AutoTokenizer, CLIPTextModel
792
+
793
+ >>> model = CLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
794
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
795
+
796
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
797
+
798
+ >>> outputs = model(**inputs)
799
+ >>> last_hidden_state = outputs.last_hidden_state
800
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
801
+ ```"""
802
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
803
+
804
+ return self.text_model(
805
+ input_ids=input_ids,
806
+ attention_mask=attention_mask,
807
+ position_ids=position_ids,
808
+ output_attentions=output_attentions,
809
+ output_hidden_states=output_hidden_states,
810
+ return_dict=return_dict,
811
+ )
812
+
813
+
814
+ class CLIPVisionTransformer(nn.Module):
815
+ def __init__(self, config: CLIPVisionConfig):
816
+ super().__init__()
817
+ self.config = config
818
+ embed_dim = config.hidden_size
819
+
820
+ self.embeddings = CLIPVisionEmbeddings(config)
821
+ self.pre_layrnorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
822
+ self.encoder = CLIPEncoder(config)
823
+ self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
824
+
825
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
826
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
827
+ def forward(
828
+ self,
829
+ pixel_values: Optional[torch.FloatTensor] = None,
830
+ output_attentions: Optional[bool] = None,
831
+ output_hidden_states: Optional[bool] = None,
832
+ return_dict: Optional[bool] = None,
833
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
834
+ r"""
835
+ Returns:
836
+
837
+ """
838
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
839
+ output_hidden_states = (
840
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
841
+ )
842
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
843
+
844
+ if pixel_values is None:
845
+ raise ValueError("You have to specify pixel_values")
846
+
847
+ hidden_states = self.embeddings(pixel_values)
848
+ hidden_states = self.pre_layrnorm(hidden_states)
849
+
850
+ encoder_outputs = self.encoder(
851
+ inputs_embeds=hidden_states,
852
+ output_attentions=output_attentions,
853
+ output_hidden_states=output_hidden_states,
854
+ return_dict=return_dict,
855
+ )
856
+
857
+ last_hidden_state = encoder_outputs[0]
858
+ pooled_output = last_hidden_state[:, 0, :]
859
+ pooled_output = self.post_layernorm(pooled_output)
860
+
861
+ if not return_dict:
862
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
863
+
864
+ return BaseModelOutputWithPooling(
865
+ last_hidden_state=last_hidden_state,
866
+ pooler_output=pooled_output,
867
+ hidden_states=encoder_outputs.hidden_states,
868
+ attentions=encoder_outputs.attentions,
869
+ )
870
+
871
+
872
+ @add_start_docstrings(
873
+ """The vision model from CLIP without any head or projection on top.""",
874
+ CLIP_START_DOCSTRING,
875
+ )
876
+ class CLIPVisionModel(CLIPPreTrainedModel):
877
+ config_class = CLIPVisionConfig
878
+ main_input_name = "pixel_values"
879
+ _no_split_modules = ["CLIPEncoderLayer"]
880
+
881
+ def __init__(self, config: CLIPVisionConfig):
882
+ super().__init__(config)
883
+ self.vision_model = CLIPVisionTransformer(config)
884
+ # Initialize weights and apply final processing
885
+ self.post_init()
886
+
887
+ def get_input_embeddings(self) -> nn.Module:
888
+ return self.vision_model.embeddings.patch_embedding
889
+
890
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
891
+ @replace_return_docstrings(output_type=BaseModelOutputWithPooling, config_class=CLIPVisionConfig)
892
+ def forward(
893
+ self,
894
+ pixel_values: Optional[torch.FloatTensor] = None,
895
+ output_attentions: Optional[bool] = None,
896
+ output_hidden_states: Optional[bool] = None,
897
+ return_dict: Optional[bool] = None,
898
+ ) -> Union[Tuple, BaseModelOutputWithPooling]:
899
+ r"""
900
+ Returns:
901
+
902
+ Examples:
903
+
904
+ ```python
905
+ >>> from PIL import Image
906
+ >>> import requests
907
+ >>> from transformers import AutoProcessor, CLIPVisionModel
908
+
909
+ >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
910
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
911
+
912
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
913
+ >>> image = Image.open(requests.get(url, stream=True).raw)
914
+
915
+ >>> inputs = processor(images=image, return_tensors="pt")
916
+
917
+ >>> outputs = model(**inputs)
918
+ >>> last_hidden_state = outputs.last_hidden_state
919
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
920
+ ```"""
921
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
922
+
923
+ return self.vision_model(
924
+ pixel_values=pixel_values,
925
+ output_attentions=output_attentions,
926
+ output_hidden_states=output_hidden_states,
927
+ return_dict=return_dict,
928
+ )
929
+
930
+
931
+ @add_start_docstrings(CLIP_START_DOCSTRING)
932
+ class CLIPModel(CLIPPreTrainedModel):
933
+ config_class = CLIPConfig
934
+ _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
935
+
936
+ def __init__(self, config: CLIPConfig):
937
+ super().__init__(config)
938
+
939
+ if not isinstance(config.text_config, CLIPTextConfig):
940
+ raise ValueError(
941
+ "config.text_config is expected to be of type CLIPTextConfig but is of type"
942
+ f" {type(config.text_config)}."
943
+ )
944
+
945
+ if not isinstance(config.vision_config, CLIPVisionConfig):
946
+ raise ValueError(
947
+ "config.vision_config is expected to be of type CLIPVisionConfig but is of type"
948
+ f" {type(config.vision_config)}."
949
+ )
950
+
951
+ text_config = config.text_config
952
+ vision_config = config.vision_config
953
+
954
+ self.projection_dim = config.projection_dim
955
+ self.text_embed_dim = text_config.hidden_size
956
+ self.vision_embed_dim = vision_config.hidden_size
957
+
958
+ self.text_model = CLIPTextTransformer(text_config)
959
+ self.vision_model = CLIPVisionTransformer(vision_config)
960
+
961
+ self.visual_projection = nn.Linear(self.vision_embed_dim, self.projection_dim, bias=False)
962
+ self.text_projection = nn.Linear(self.text_embed_dim, self.projection_dim, bias=False)
963
+ self.logit_scale = nn.Parameter(torch.tensor(self.config.logit_scale_init_value))
964
+
965
+ # Initialize weights and apply final processing
966
+ self.post_init()
967
+
968
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
969
+ def get_text_features(
970
+ self,
971
+ input_ids: Optional[torch.Tensor] = None,
972
+ attention_mask: Optional[torch.Tensor] = None,
973
+ position_ids: Optional[torch.Tensor] = None,
974
+ output_attentions: Optional[bool] = None,
975
+ output_hidden_states: Optional[bool] = None,
976
+ return_dict: Optional[bool] = None,
977
+ ) -> torch.FloatTensor:
978
+ r"""
979
+ Returns:
980
+ text_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The text embeddings obtained by
981
+ applying the projection layer to the pooled output of [`CLIPTextModel`].
982
+
983
+ Examples:
984
+
985
+ ```python
986
+ >>> from transformers import AutoTokenizer, CLIPModel
987
+
988
+ >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
989
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
990
+
991
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
992
+ >>> text_features = model.get_text_features(**inputs)
993
+ ```"""
994
+ # Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
995
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
996
+ output_hidden_states = (
997
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
998
+ )
999
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1000
+
1001
+ text_outputs = self.text_model(
1002
+ input_ids=input_ids,
1003
+ attention_mask=attention_mask,
1004
+ position_ids=position_ids,
1005
+ output_attentions=output_attentions,
1006
+ output_hidden_states=output_hidden_states,
1007
+ return_dict=return_dict,
1008
+ )
1009
+
1010
+ pooled_output = text_outputs[1]
1011
+ text_features = self.text_projection(pooled_output)
1012
+
1013
+ return text_features
1014
+
1015
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
1016
+ def get_image_features(
1017
+ self,
1018
+ pixel_values: Optional[torch.FloatTensor] = None,
1019
+ output_attentions: Optional[bool] = None,
1020
+ output_hidden_states: Optional[bool] = None,
1021
+ return_dict: Optional[bool] = None,
1022
+ ) -> torch.FloatTensor:
1023
+ r"""
1024
+ Returns:
1025
+ image_features (`torch.FloatTensor` of shape `(batch_size, output_dim`): The image embeddings obtained by
1026
+ applying the projection layer to the pooled output of [`CLIPVisionModel`].
1027
+
1028
+ Examples:
1029
+
1030
+ ```python
1031
+ >>> from PIL import Image
1032
+ >>> import requests
1033
+ >>> from transformers import AutoProcessor, CLIPModel
1034
+
1035
+ >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
1036
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1037
+
1038
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1039
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1040
+
1041
+ >>> inputs = processor(images=image, return_tensors="pt")
1042
+
1043
+ >>> image_features = model.get_image_features(**inputs)
1044
+ ```"""
1045
+ # Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
1046
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1047
+ output_hidden_states = (
1048
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1049
+ )
1050
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1051
+
1052
+ vision_outputs = self.vision_model(
1053
+ pixel_values=pixel_values,
1054
+ output_attentions=output_attentions,
1055
+ output_hidden_states=output_hidden_states,
1056
+ return_dict=return_dict,
1057
+ )
1058
+
1059
+ pooled_output = vision_outputs[1] # pooled_output
1060
+ image_features = self.visual_projection(pooled_output)
1061
+
1062
+ return image_features
1063
+
1064
+ @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
1065
+ @replace_return_docstrings(output_type=CLIPOutput, config_class=CLIPConfig)
1066
+ def forward(
1067
+ self,
1068
+ input_ids: Optional[torch.LongTensor] = None,
1069
+ pixel_values: Optional[torch.FloatTensor] = None,
1070
+ attention_mask: Optional[torch.Tensor] = None,
1071
+ position_ids: Optional[torch.LongTensor] = None,
1072
+ return_loss: Optional[bool] = None,
1073
+ output_attentions: Optional[bool] = None,
1074
+ output_hidden_states: Optional[bool] = None,
1075
+ return_dict: Optional[bool] = None,
1076
+ ) -> Union[Tuple, CLIPOutput]:
1077
+ r"""
1078
+ Returns:
1079
+
1080
+ Examples:
1081
+
1082
+ ```python
1083
+ >>> from PIL import Image
1084
+ >>> import requests
1085
+ >>> from transformers import AutoProcessor, CLIPModel
1086
+
1087
+ >>> model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
1088
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1089
+
1090
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1091
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1092
+
1093
+ >>> inputs = processor(
1094
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="pt", padding=True
1095
+ ... )
1096
+
1097
+ >>> outputs = model(**inputs)
1098
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1099
+ >>> probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
1100
+ ```"""
1101
+ # Use CLIP model's config for some fields (if specified) instead of those of vision & text components.
1102
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1103
+ output_hidden_states = (
1104
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1105
+ )
1106
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1107
+
1108
+ vision_outputs = self.vision_model(
1109
+ pixel_values=pixel_values,
1110
+ output_attentions=output_attentions,
1111
+ output_hidden_states=output_hidden_states,
1112
+ return_dict=return_dict,
1113
+ )
1114
+
1115
+ text_outputs = self.text_model(
1116
+ input_ids=input_ids,
1117
+ attention_mask=attention_mask,
1118
+ position_ids=position_ids,
1119
+ output_attentions=output_attentions,
1120
+ output_hidden_states=output_hidden_states,
1121
+ return_dict=return_dict,
1122
+ )
1123
+
1124
+ image_embeds = vision_outputs[1]
1125
+ image_embeds = self.visual_projection(image_embeds)
1126
+
1127
+ text_embeds = text_outputs[1]
1128
+ text_embeds = self.text_projection(text_embeds)
1129
+
1130
+ # normalized features
1131
+ image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
1132
+ text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
1133
+
1134
+ # cosine similarity as logits
1135
+ logit_scale = self.logit_scale.exp()
1136
+ logits_per_text = torch.matmul(text_embeds, image_embeds.t()) * logit_scale
1137
+ logits_per_image = logits_per_text.t()
1138
+
1139
+ loss = None
1140
+ if return_loss:
1141
+ loss = clip_loss(logits_per_text)
1142
+
1143
+ if not return_dict:
1144
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1145
+ return ((loss,) + output) if loss is not None else output
1146
+
1147
+ return CLIPOutput(
1148
+ loss=loss,
1149
+ logits_per_image=logits_per_image,
1150
+ logits_per_text=logits_per_text,
1151
+ text_embeds=text_embeds,
1152
+ image_embeds=image_embeds,
1153
+ text_model_output=text_outputs,
1154
+ vision_model_output=vision_outputs,
1155
+ )
1156
+
1157
+
1158
+ @add_start_docstrings(
1159
+ """
1160
+ CLIP Text Model with a projection layer on top (a linear layer on top of the pooled output).
1161
+ """,
1162
+ CLIP_START_DOCSTRING,
1163
+ )
1164
+ class CLIPTextModelWithProjection(CLIPPreTrainedModel):
1165
+ config_class = CLIPTextConfig
1166
+
1167
+ _no_split_modules = ["CLIPTextEmbeddings", "CLIPEncoderLayer"]
1168
+
1169
+ def __init__(self, config: CLIPTextConfig):
1170
+ super().__init__(config)
1171
+
1172
+ self.text_model = CLIPTextTransformer(config)
1173
+
1174
+ self.text_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
1175
+
1176
+ # Initialize weights and apply final processing
1177
+ self.post_init()
1178
+
1179
+ def get_input_embeddings(self) -> nn.Module:
1180
+ return self.text_model.embeddings.token_embedding
1181
+
1182
+ def set_input_embeddings(self, value):
1183
+ self.text_model.embeddings.token_embedding = value
1184
+
1185
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING)
1186
+ @replace_return_docstrings(output_type=CLIPTextModelOutput, config_class=CLIPTextConfig)
1187
+ def forward(
1188
+ self,
1189
+ input_ids: Optional[torch.Tensor] = None,
1190
+ attention_mask: Optional[torch.Tensor] = None,
1191
+ position_ids: Optional[torch.Tensor] = None,
1192
+ output_attentions: Optional[bool] = None,
1193
+ output_hidden_states: Optional[bool] = None,
1194
+ return_dict: Optional[bool] = None,
1195
+ ) -> Union[Tuple, CLIPTextModelOutput]:
1196
+ r"""
1197
+ Returns:
1198
+
1199
+ Examples:
1200
+
1201
+ ```python
1202
+ >>> from transformers import AutoTokenizer, CLIPTextModelWithProjection
1203
+
1204
+ >>> model = CLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
1205
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
1206
+
1207
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="pt")
1208
+
1209
+ >>> outputs = model(**inputs)
1210
+ >>> text_embeds = outputs.text_embeds
1211
+ ```"""
1212
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1213
+
1214
+ text_outputs = self.text_model(
1215
+ input_ids=input_ids,
1216
+ attention_mask=attention_mask,
1217
+ position_ids=position_ids,
1218
+ output_attentions=output_attentions,
1219
+ output_hidden_states=output_hidden_states,
1220
+ return_dict=return_dict,
1221
+ )
1222
+
1223
+ pooled_output = text_outputs[1]
1224
+
1225
+ text_embeds = self.text_projection(pooled_output)
1226
+
1227
+ if not return_dict:
1228
+ outputs = (text_embeds, text_outputs[0]) + text_outputs[2:]
1229
+ return tuple(output for output in outputs if output is not None)
1230
+
1231
+ return CLIPTextModelOutput(
1232
+ text_embeds=text_embeds,
1233
+ last_hidden_state=text_outputs.last_hidden_state,
1234
+ hidden_states=text_outputs.hidden_states,
1235
+ attentions=text_outputs.attentions,
1236
+ )
1237
+
1238
+
1239
+ @add_start_docstrings(
1240
+ """
1241
+ CLIP Vision Model with a projection layer on top (a linear layer on top of the pooled output).
1242
+ """,
1243
+ CLIP_START_DOCSTRING,
1244
+ )
1245
+ class CLIPVisionModelWithProjection(CLIPPreTrainedModel):
1246
+ config_class = CLIPVisionConfig
1247
+ main_input_name = "pixel_values"
1248
+
1249
+ def __init__(self, config: CLIPVisionConfig):
1250
+ super().__init__(config)
1251
+
1252
+ self.vision_model = CLIPVisionTransformer(config)
1253
+
1254
+ self.visual_projection = nn.Linear(config.hidden_size, config.projection_dim, bias=False)
1255
+
1256
+ # Initialize weights and apply final processing
1257
+ self.post_init()
1258
+
1259
+ def get_input_embeddings(self) -> nn.Module:
1260
+ return self.vision_model.embeddings.patch_embedding
1261
+
1262
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
1263
+ @replace_return_docstrings(output_type=CLIPVisionModelOutput, config_class=CLIPVisionConfig)
1264
+ def forward(
1265
+ self,
1266
+ pixel_values: Optional[torch.FloatTensor] = None,
1267
+ output_attentions: Optional[bool] = None,
1268
+ output_hidden_states: Optional[bool] = None,
1269
+ return_dict: Optional[bool] = None,
1270
+ ) -> Union[Tuple, CLIPVisionModelOutput]:
1271
+ r"""
1272
+ Returns:
1273
+
1274
+ Examples:
1275
+
1276
+ ```python
1277
+ >>> from PIL import Image
1278
+ >>> import requests
1279
+ >>> from transformers import AutoProcessor, CLIPVisionModelWithProjection
1280
+
1281
+ >>> model = CLIPVisionModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
1282
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1283
+
1284
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1285
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1286
+
1287
+ >>> inputs = processor(images=image, return_tensors="pt")
1288
+
1289
+ >>> outputs = model(**inputs)
1290
+ >>> image_embeds = outputs.image_embeds
1291
+ ```"""
1292
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1293
+
1294
+ vision_outputs = self.vision_model(
1295
+ pixel_values=pixel_values,
1296
+ output_attentions=output_attentions,
1297
+ output_hidden_states=output_hidden_states,
1298
+ return_dict=return_dict,
1299
+ )
1300
+
1301
+ pooled_output = vision_outputs[1] # pooled_output
1302
+
1303
+ image_embeds = self.visual_projection(pooled_output)
1304
+
1305
+ if not return_dict:
1306
+ outputs = (image_embeds, vision_outputs[0]) + vision_outputs[2:]
1307
+ return tuple(output for output in outputs if output is not None)
1308
+
1309
+ return CLIPVisionModelOutput(
1310
+ image_embeds=image_embeds,
1311
+ last_hidden_state=vision_outputs.last_hidden_state,
1312
+ hidden_states=vision_outputs.hidden_states,
1313
+ attentions=vision_outputs.attentions,
1314
+ )
1315
+
1316
+
1317
+ @add_start_docstrings(
1318
+ """
1319
+ CLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of
1320
+ the patch tokens) e.g. for ImageNet.
1321
+ """,
1322
+ CLIP_START_DOCSTRING,
1323
+ )
1324
+ class CLIPForImageClassification(CLIPPreTrainedModel):
1325
+ main_input_name = "pixel_values"
1326
+
1327
+ def __init__(self, config: CLIPConfig) -> None:
1328
+ super().__init__(config)
1329
+
1330
+ self.num_labels = config.num_labels
1331
+ self.vision_model = CLIPVisionTransformer(config.vision_config)
1332
+
1333
+ # Classifier head
1334
+ self.classifier = (
1335
+ nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
1336
+ )
1337
+
1338
+ # Initialize weights and apply final processing
1339
+ self.post_init()
1340
+
1341
+ @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING)
1342
+ @add_code_sample_docstrings(
1343
+ checkpoint=_IMAGE_CLASS_CHECKPOINT,
1344
+ output_type=ImageClassifierOutput,
1345
+ config_class=_CONFIG_FOR_DOC,
1346
+ expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT,
1347
+ )
1348
+ def forward(
1349
+ self,
1350
+ pixel_values: Optional[torch.Tensor] = None,
1351
+ labels: Optional[torch.Tensor] = None,
1352
+ output_attentions: Optional[bool] = None,
1353
+ output_hidden_states: Optional[bool] = None,
1354
+ return_dict: Optional[bool] = None,
1355
+ ) -> Union[tuple, ImageClassifierOutput]:
1356
+ r"""
1357
+ labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
1358
+ Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
1359
+ config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
1360
+ `config.num_labels > 1` a classification loss is computed (Cross-Entropy).
1361
+ """
1362
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
1363
+ output_hidden_states = (
1364
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
1365
+ )
1366
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
1367
+
1368
+ outputs = self.vision_model(
1369
+ pixel_values,
1370
+ output_attentions=output_attentions,
1371
+ output_hidden_states=output_hidden_states,
1372
+ return_dict=return_dict,
1373
+ )
1374
+
1375
+ sequence_output = outputs[0]
1376
+
1377
+ # average pool the patch tokens
1378
+ sequence_output = torch.mean(sequence_output[:, 1:, :], dim=1)
1379
+ # apply classifier
1380
+ logits = self.classifier(sequence_output)
1381
+
1382
+ loss = None
1383
+ if labels is not None:
1384
+ # move labels to correct device to enable model parallelism
1385
+ labels = labels.to(logits.device)
1386
+ if self.config.problem_type is None:
1387
+ if self.num_labels == 1:
1388
+ self.config.problem_type = "regression"
1389
+ elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
1390
+ self.config.problem_type = "single_label_classification"
1391
+ else:
1392
+ self.config.problem_type = "multi_label_classification"
1393
+
1394
+ if self.config.problem_type == "regression":
1395
+ loss_fct = MSELoss()
1396
+ if self.num_labels == 1:
1397
+ loss = loss_fct(logits.squeeze(), labels.squeeze())
1398
+ else:
1399
+ loss = loss_fct(logits, labels)
1400
+ elif self.config.problem_type == "single_label_classification":
1401
+ loss_fct = CrossEntropyLoss()
1402
+ loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
1403
+ elif self.config.problem_type == "multi_label_classification":
1404
+ loss_fct = BCEWithLogitsLoss()
1405
+ loss = loss_fct(logits, labels)
1406
+
1407
+ if not return_dict:
1408
+ output = (logits,) + outputs[2:]
1409
+ return ((loss,) + output) if loss is not None else output
1410
+
1411
+ return ImageClassifierOutput(
1412
+ loss=loss,
1413
+ logits=logits,
1414
+ hidden_states=outputs.hidden_states,
1415
+ attentions=outputs.attentions,
1416
+ )
venv/lib/python3.10/site-packages/transformers/models/clip/modeling_flax_clip.py ADDED
@@ -0,0 +1,1295 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The OpenAI Team Authors, The Google Flax Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from typing import Any, Optional, Tuple, Union
17
+
18
+ import flax
19
+ import flax.linen as nn
20
+ import jax
21
+ import jax.numpy as jnp
22
+ from flax.core.frozen_dict import FrozenDict, freeze, unfreeze
23
+ from flax.linen import combine_masks, make_causal_mask
24
+ from flax.linen.attention import dot_product_attention_weights
25
+ from flax.traverse_util import flatten_dict, unflatten_dict
26
+ from jax import lax
27
+
28
+ from ...modeling_flax_outputs import FlaxBaseModelOutput, FlaxBaseModelOutputWithPooling
29
+ from ...modeling_flax_utils import (
30
+ ACT2FN,
31
+ FlaxPreTrainedModel,
32
+ append_replace_return_docstrings,
33
+ overwrite_call_docstring,
34
+ )
35
+ from ...utils import ModelOutput, add_start_docstrings, logging
36
+ from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
37
+
38
+
39
+ logger = logging.get_logger(__name__)
40
+
41
+ CLIP_START_DOCSTRING = r"""
42
+
43
+ This model inherits from [`FlaxPreTrainedModel`]. Check the superclass documentation for the generic methods the
44
+ library implements for all its model (such as downloading, saving and converting weights from PyTorch models)
45
+
46
+ This model is also a
47
+ [flax.linen.Module](https://flax.readthedocs.io/en/latest/api_reference/flax.linen/module.html) subclass. Use it as
48
+ a regular Flax linen Module and refer to the Flax documentation for all matter related to general usage and
49
+ behavior.
50
+
51
+ Finally, this model supports inherent JAX features such as:
52
+
53
+ - [Just-In-Time (JIT) compilation](https://jax.readthedocs.io/en/latest/jax.html#just-in-time-compilation-jit)
54
+ - [Automatic Differentiation](https://jax.readthedocs.io/en/latest/jax.html#automatic-differentiation)
55
+ - [Vectorization](https://jax.readthedocs.io/en/latest/jax.html#vectorization-vmap)
56
+ - [Parallelization](https://jax.readthedocs.io/en/latest/jax.html#parallelization-pmap)
57
+
58
+ Parameters:
59
+ config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
60
+ Initializing with a config file does not load the weights associated with the model, only the
61
+ configuration. Check out the [`~FlaxPreTrainedModel.from_pretrained`] method to load the model weights.
62
+ dtype (`jax.numpy.dtype`, *optional*, defaults to `jax.numpy.float32`):
63
+ The data type of the computation. Can be one of `jax.numpy.float32`, `jax.numpy.float16` (on GPUs) and
64
+ `jax.numpy.bfloat16` (on TPUs).
65
+
66
+ This can be used to enable mixed-precision training or half-precision inference on GPUs or TPUs. If
67
+ specified all the computation will be performed with the given `dtype`.
68
+
69
+ **Note that this only specifies the dtype of the computation and does not influence the dtype of model
70
+ parameters.**
71
+
72
+ If you wish to change the dtype of the model parameters, see [`~FlaxPreTrainedModel.to_fp16`] and
73
+ [`~FlaxPreTrainedModel.to_bf16`].
74
+ """
75
+
76
+ CLIP_TEXT_INPUTS_DOCSTRING = r"""
77
+ Args:
78
+ input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):
79
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
80
+ it.
81
+
82
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
83
+ [`PreTrainedTokenizer.__call__`] for details.
84
+
85
+ [What are input IDs?](../glossary#input-ids)
86
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
87
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
88
+
89
+ - 1 for tokens that are **not masked**,
90
+ - 0 for tokens that are **masked**.
91
+
92
+ [What are attention masks?](../glossary#attention-mask)
93
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
94
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
95
+ config.max_position_embeddings - 1]`.
96
+
97
+ [What are position IDs?](../glossary#position-ids)
98
+ output_attentions (`bool`, *optional*):
99
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
100
+ tensors for more detail.
101
+ output_hidden_states (`bool`, *optional*):
102
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
103
+ more detail.
104
+ return_dict (`bool`, *optional*):
105
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
106
+ """
107
+
108
+ CLIP_VISION_INPUTS_DOCSTRING = r"""
109
+ Args:
110
+ pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
111
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
112
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
113
+ output_attentions (`bool`, *optional*):
114
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
115
+ tensors for more detail.
116
+ output_hidden_states (`bool`, *optional*):
117
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
118
+ more detail.
119
+ return_dict (`bool`, *optional*):
120
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
121
+ """
122
+
123
+ CLIP_INPUTS_DOCSTRING = r"""
124
+ Args:
125
+ input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):
126
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you provide
127
+ it.
128
+
129
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
130
+ [`PreTrainedTokenizer.__call__`] for details.
131
+
132
+ [What are input IDs?](../glossary#input-ids)
133
+ attention_mask (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
134
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
135
+
136
+ - 1 for tokens that are **not masked**,
137
+ - 0 for tokens that are **masked**.
138
+
139
+ [What are attention masks?](../glossary#attention-mask)
140
+ position_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`, *optional*):
141
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
142
+ config.max_position_embeddings - 1]`.
143
+
144
+ [What are position IDs?](../glossary#position-ids)
145
+ pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
146
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained using
147
+ [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
148
+ output_attentions (`bool`, *optional*):
149
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
150
+ tensors for more detail.
151
+ output_hidden_states (`bool`, *optional*):
152
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
153
+ more detail.
154
+ return_dict (`bool`, *optional*):
155
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple.
156
+ """
157
+
158
+
159
+ @flax.struct.dataclass
160
+ class FlaxCLIPTextModelOutput(ModelOutput):
161
+ """
162
+ Base class for text model's outputs that also contains a pooling of the last hidden states.
163
+
164
+ Args:
165
+ text_embeds (`jnp.ndarray` of shape `(batch_size, output_dim`):
166
+ The text embeddings obtained by applying the projection layer to the pooled output of
167
+ [`FlaxCLIPTextModel`].
168
+ last_hidden_state (`jnp.ndarray` of shape `(batch_size, sequence_length, hidden_size)`):
169
+ Sequence of hidden-states at the output of the last layer of the model.
170
+ hidden_states (`tuple(jnp.ndarray)`, *optional*, returned when `output_hidden_states=True` is passed or when `config.output_hidden_states=True`):
171
+ Tuple of `jnp.ndarray` (one for the output of the embeddings + one for the output of each layer) of shape
172
+ `(batch_size, sequence_length, hidden_size)`.
173
+
174
+ Hidden-states of the model at the output of each layer plus the initial embedding outputs.
175
+ attentions (`tuple(jnp.ndarray)`, *optional*, returned when `output_attentions=True` is passed or when `config.output_attentions=True`):
176
+ Tuple of `jnp.ndarray` (one for each layer) of shape `(batch_size, num_heads, sequence_length,
177
+ sequence_length)`.
178
+
179
+ Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
180
+ heads.
181
+ """
182
+
183
+ text_embeds: jnp.ndarray = None
184
+ last_hidden_state: jnp.ndarray = None
185
+ hidden_states: Optional[Tuple[jnp.ndarray, ...]] = None
186
+ attentions: Optional[Tuple[jnp.ndarray, ...]] = None
187
+
188
+
189
+ @flax.struct.dataclass
190
+ class FlaxCLIPOutput(ModelOutput):
191
+ """
192
+ Args:
193
+ logits_per_image:(`jnp.ndarray` of shape `(image_batch_size, text_batch_size)`):
194
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
195
+ similarity scores.
196
+ logits_per_text:(`jnp.ndarray` of shape `(text_batch_size, image_batch_size)`):
197
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
198
+ similarity scores.
199
+ text_embeds(`jnp.ndarray` of shape `(batch_size, output_dim`):
200
+ The text embeddings obtained by applying the projection layer to the pooled output of
201
+ [`FlaxCLIPTextModel`].
202
+ image_embeds(`jnp.ndarray` of shape `(batch_size, output_dim`):
203
+ The image embeddings obtained by applying the projection layer to the pooled output of
204
+ [`FlaxCLIPVisionModel`].
205
+ text_model_output(`FlaxBaseModelOutputWithPooling`):
206
+ The output of the [`FlaxCLIPTextModel`].
207
+ vision_model_output(`FlaxBaseModelOutputWithPooling`):
208
+ The output of the [`FlaxCLIPVisionModel`].
209
+ """
210
+
211
+ logits_per_image: jnp.ndarray = None
212
+ logits_per_text: jnp.ndarray = None
213
+ text_embeds: jnp.ndarray = None
214
+ image_embeds: jnp.ndarray = None
215
+ text_model_output: FlaxBaseModelOutputWithPooling = None
216
+ vision_model_output: FlaxBaseModelOutputWithPooling = None
217
+
218
+ def to_tuple(self) -> Tuple[Any]:
219
+ return tuple(
220
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
221
+ for k in self.keys()
222
+ )
223
+
224
+
225
+ class FlaxCLIPVisionEmbeddings(nn.Module):
226
+ config: CLIPVisionConfig
227
+ dtype: jnp.dtype = jnp.float32
228
+
229
+ def setup(self):
230
+ embed_dim = self.config.hidden_size
231
+ image_size = self.config.image_size
232
+ patch_size = self.config.patch_size
233
+
234
+ self.class_embedding = self.param("class_embedding", jax.nn.initializers.normal(stddev=0.02), (embed_dim,))
235
+
236
+ self.patch_embedding = nn.Conv(
237
+ embed_dim,
238
+ kernel_size=(patch_size, patch_size),
239
+ strides=(patch_size, patch_size),
240
+ padding="VALID",
241
+ use_bias=False,
242
+ dtype=self.dtype,
243
+ kernel_init=jax.nn.initializers.normal(),
244
+ )
245
+
246
+ self.num_patches = (image_size // patch_size) ** 2
247
+ num_positions = self.num_patches + 1
248
+ self.position_embedding = nn.Embed(num_positions, embed_dim, embedding_init=jax.nn.initializers.normal())
249
+ self.position_ids = jnp.expand_dims(jnp.arange(0, num_positions, dtype="i4"), axis=0)
250
+
251
+ def __call__(self, pixel_values):
252
+ patch_embeds = self.patch_embedding(pixel_values)
253
+ batch_size, height, width, channels = patch_embeds.shape
254
+ patch_embeds = jnp.reshape(patch_embeds, (batch_size, height * width, channels))
255
+
256
+ class_embeds = jnp.expand_dims(self.class_embedding, axis=(0, 1))
257
+ class_embeds = jnp.tile(class_embeds, (batch_size, 1, 1))
258
+ embeddings = jnp.concatenate([class_embeds, patch_embeds], axis=1)
259
+ embeddings = embeddings + self.position_embedding(self.position_ids)
260
+ return embeddings
261
+
262
+
263
+ class FlaxCLIPTextEmbeddings(nn.Module):
264
+ config: CLIPTextConfig
265
+ dtype: jnp.dtype = jnp.float32
266
+
267
+ def setup(self):
268
+ embed_dim = self.config.hidden_size
269
+
270
+ self.token_embedding = nn.Embed(self.config.vocab_size, embed_dim, embedding_init=jax.nn.initializers.normal())
271
+ self.position_embedding = nn.Embed(
272
+ self.config.max_position_embeddings, embed_dim, embedding_init=jax.nn.initializers.normal()
273
+ )
274
+ self.position_ids = jnp.expand_dims(
275
+ jnp.arange(0, self.config.max_position_embeddings, dtype="i4"), axis=(0, 1)
276
+ )
277
+
278
+ def __call__(self, input_ids, position_ids):
279
+ input_embeds = self.token_embedding(input_ids.astype("i4"))
280
+ position_embeds = self.position_embedding(position_ids.astype("i4"))
281
+
282
+ embeddings = input_embeds + position_embeds
283
+ return embeddings
284
+
285
+
286
+ class FlaxCLIPAttention(nn.Module):
287
+ config: Union[CLIPTextConfig, CLIPVisionConfig]
288
+ dtype: jnp.dtype = jnp.float32
289
+
290
+ def setup(self):
291
+ self.embed_dim = self.config.hidden_size
292
+ self.num_heads = self.config.num_attention_heads
293
+ self.head_dim = self.embed_dim // self.num_heads
294
+ if self.head_dim * self.num_heads != self.embed_dim:
295
+ raise ValueError(
296
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
297
+ f" {self.num_heads})."
298
+ )
299
+ self.scale = self.head_dim**-0.5
300
+ self.dropout = self.config.attention_dropout
301
+
302
+ self.k_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
303
+ self.v_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
304
+ self.q_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
305
+ self.out_proj = nn.Dense(self.embed_dim, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
306
+
307
+ self.causal = isinstance(self.config, CLIPTextConfig)
308
+ if self.causal:
309
+ self.causal_mask = make_causal_mask(jnp.ones((1, self.config.max_position_embeddings), dtype="i4"))
310
+
311
+ def _split_heads(self, hidden_states):
312
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.num_heads, self.head_dim))
313
+
314
+ def _merge_heads(self, hidden_states):
315
+ return hidden_states.reshape(hidden_states.shape[:2] + (self.embed_dim,))
316
+
317
+ def __call__(
318
+ self,
319
+ hidden_states,
320
+ attention_mask=None,
321
+ deterministic: bool = True,
322
+ output_attentions: bool = False,
323
+ ):
324
+ query = self.q_proj(hidden_states)
325
+ key = self.k_proj(hidden_states)
326
+ value = self.v_proj(hidden_states)
327
+
328
+ query = self._split_heads(query)
329
+ key = self._split_heads(key)
330
+ value = self._split_heads(value)
331
+
332
+ causal_attention_mask = None
333
+ if self.causal:
334
+ query_length, key_length = query.shape[1], key.shape[1]
335
+ causal_attention_mask = self.causal_mask[:, :, key_length - query_length : key_length, :key_length]
336
+
337
+ if attention_mask is not None and causal_attention_mask is not None:
338
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
339
+ attention_mask = combine_masks(attention_mask, causal_attention_mask, dtype="i4")
340
+ elif causal_attention_mask is not None:
341
+ attention_mask = causal_attention_mask
342
+ elif attention_mask is not None:
343
+ attention_mask = jnp.expand_dims(attention_mask, axis=(-3, -2))
344
+
345
+ if attention_mask is not None:
346
+ attention_bias = lax.select(
347
+ attention_mask > 0,
348
+ jnp.full(attention_mask.shape, 0.0).astype(self.dtype),
349
+ jnp.full(attention_mask.shape, jnp.finfo(self.dtype).min).astype(self.dtype),
350
+ )
351
+ else:
352
+ attention_bias = None
353
+
354
+ dropout_rng = None
355
+ if not deterministic and self.dropout > 0.0:
356
+ dropout_rng = self.make_rng("dropout")
357
+
358
+ attn_weights = dot_product_attention_weights(
359
+ query,
360
+ key,
361
+ bias=attention_bias,
362
+ dropout_rng=dropout_rng,
363
+ dropout_rate=self.dropout,
364
+ deterministic=deterministic,
365
+ dtype=self.dtype,
366
+ precision=None,
367
+ )
368
+
369
+ attn_output = jnp.einsum("...hqk,...khd->...qhd", attn_weights, value)
370
+ attn_output = self._merge_heads(attn_output)
371
+ attn_output = self.out_proj(attn_output)
372
+
373
+ outputs = (attn_output, attn_weights) if output_attentions else (attn_output,)
374
+ return outputs
375
+
376
+
377
+ class FlaxCLIPMLP(nn.Module):
378
+ config: Union[CLIPTextConfig, CLIPVisionConfig]
379
+ dtype: jnp.dtype = jnp.float32
380
+
381
+ def setup(self):
382
+ self.activation_fn = ACT2FN[self.config.hidden_act]
383
+ self.fc1 = nn.Dense(
384
+ self.config.intermediate_size,
385
+ dtype=self.dtype,
386
+ kernel_init=jax.nn.initializers.normal(0.01),
387
+ )
388
+ self.fc2 = nn.Dense(self.config.hidden_size, dtype=self.dtype, kernel_init=jax.nn.initializers.normal(0.01))
389
+
390
+ def __call__(self, hidden_states):
391
+ hidden_states = self.fc1(hidden_states)
392
+ hidden_states = self.activation_fn(hidden_states)
393
+ hidden_states = self.fc2(hidden_states)
394
+ return hidden_states
395
+
396
+
397
+ class FlaxCLIPEncoderLayer(nn.Module):
398
+ config: Union[CLIPTextConfig, CLIPVisionConfig]
399
+ dtype: jnp.dtype = jnp.float32
400
+
401
+ def setup(self):
402
+ self.self_attn = FlaxCLIPAttention(self.config, dtype=self.dtype)
403
+ self.layer_norm1 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
404
+ self.mlp = FlaxCLIPMLP(self.config, dtype=self.dtype)
405
+ self.layer_norm2 = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
406
+
407
+ def __call__(
408
+ self,
409
+ hidden_states,
410
+ attention_mask,
411
+ deterministic: bool = True,
412
+ output_attentions: bool = False,
413
+ ):
414
+ residual = hidden_states
415
+
416
+ hidden_states = self.layer_norm1(hidden_states)
417
+ attn_outputs = self.self_attn(
418
+ hidden_states=hidden_states,
419
+ attention_mask=attention_mask,
420
+ deterministic=deterministic,
421
+ output_attentions=output_attentions,
422
+ )
423
+ hidden_states = attn_outputs[0]
424
+ hidden_states = residual + hidden_states
425
+
426
+ residual = hidden_states
427
+ hidden_states = self.layer_norm2(hidden_states)
428
+ hidden_states = self.mlp(hidden_states)
429
+ hidden_states = residual + hidden_states
430
+
431
+ outputs = (hidden_states,)
432
+
433
+ if output_attentions:
434
+ outputs += attn_outputs[1:]
435
+
436
+ return outputs
437
+
438
+
439
+ class FlaxCLIPLayerCollection(nn.Module):
440
+ config: Union[CLIPTextConfig, CLIPVisionConfig]
441
+ dtype: jnp.dtype = jnp.float32
442
+
443
+ def setup(self):
444
+ self.layers = [
445
+ FlaxCLIPEncoderLayer(self.config, name=str(i), dtype=self.dtype)
446
+ for i in range(self.config.num_hidden_layers)
447
+ ]
448
+
449
+ def __call__(
450
+ self,
451
+ hidden_states,
452
+ attention_mask=None,
453
+ deterministic: bool = True,
454
+ output_attentions: bool = False,
455
+ output_hidden_states: bool = False,
456
+ return_dict: bool = True,
457
+ ):
458
+ all_attentions = () if output_attentions else None
459
+ all_hidden_states = () if output_hidden_states else None
460
+
461
+ for layer in self.layers:
462
+ if output_hidden_states:
463
+ all_hidden_states += (hidden_states,)
464
+
465
+ layer_outputs = layer(
466
+ hidden_states, attention_mask, deterministic=deterministic, output_attentions=output_attentions
467
+ )
468
+ hidden_states = layer_outputs[0]
469
+
470
+ if output_attentions:
471
+ all_attentions += (layer_outputs[1],)
472
+
473
+ if output_hidden_states:
474
+ all_hidden_states += (hidden_states,)
475
+
476
+ outputs = (hidden_states,)
477
+
478
+ if not return_dict:
479
+ return tuple(v for v in outputs if v is not None)
480
+
481
+ return FlaxBaseModelOutput(
482
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
483
+ )
484
+
485
+
486
+ class FlaxCLIPEncoder(nn.Module):
487
+ config: Union[CLIPTextConfig, CLIPVisionConfig]
488
+ dtype: jnp.dtype = jnp.float32
489
+
490
+ def setup(self):
491
+ self.layers = FlaxCLIPLayerCollection(self.config, dtype=self.dtype)
492
+
493
+ def __call__(
494
+ self,
495
+ inputs_embeds,
496
+ attention_mask=None,
497
+ deterministic: bool = True,
498
+ output_attentions: bool = False,
499
+ output_hidden_states: bool = False,
500
+ return_dict: bool = True,
501
+ ):
502
+ return self.layers(
503
+ hidden_states=inputs_embeds,
504
+ attention_mask=attention_mask,
505
+ deterministic=deterministic,
506
+ output_attentions=output_attentions,
507
+ output_hidden_states=output_hidden_states,
508
+ return_dict=return_dict,
509
+ )
510
+
511
+
512
+ class FlaxCLIPTextTransformer(nn.Module):
513
+ config: CLIPTextConfig
514
+ dtype: jnp.dtype = jnp.float32
515
+
516
+ def setup(self):
517
+ self.embeddings = FlaxCLIPTextEmbeddings(self.config, dtype=self.dtype)
518
+ self.encoder = FlaxCLIPEncoder(self.config, dtype=self.dtype)
519
+ self.final_layer_norm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
520
+
521
+ # For `pooled_output` computation
522
+ self.eos_token_id = self.config.eos_token_id
523
+
524
+ def __call__(
525
+ self,
526
+ input_ids,
527
+ attention_mask,
528
+ position_ids,
529
+ deterministic: bool = True,
530
+ output_attentions: bool = False,
531
+ output_hidden_states: bool = False,
532
+ return_dict: bool = True,
533
+ ):
534
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
535
+ output_hidden_states = (
536
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
537
+ )
538
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
539
+
540
+ hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
541
+
542
+ encoder_outputs = self.encoder(
543
+ inputs_embeds=hidden_states,
544
+ attention_mask=attention_mask,
545
+ deterministic=deterministic,
546
+ output_attentions=output_attentions,
547
+ output_hidden_states=output_hidden_states,
548
+ return_dict=return_dict,
549
+ )
550
+
551
+ last_hidden_state = encoder_outputs[0]
552
+ last_hidden_state = self.final_layer_norm(last_hidden_state)
553
+
554
+ if self.eos_token_id == 2:
555
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
556
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
557
+ # ------------------------------------------------------------
558
+ # text_embeds.shape = [batch_size, sequence_length, transformer.width]
559
+ # take features from the EOS embedding (eos_token_id is the highest number in each sequence)
560
+ pooled_output = last_hidden_state[jnp.arange(last_hidden_state.shape[0]), input_ids.argmax(axis=-1)]
561
+ else:
562
+ # (no need to cast from bool to int after comparing to `eos_token_id`)
563
+ pooled_output = last_hidden_state[
564
+ jnp.arange(last_hidden_state.shape[0]), (input_ids == self.eos_token_id).argmax(axis=-1)
565
+ ]
566
+
567
+ if not return_dict:
568
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
569
+
570
+ return FlaxBaseModelOutputWithPooling(
571
+ last_hidden_state=last_hidden_state,
572
+ pooler_output=pooled_output,
573
+ hidden_states=encoder_outputs.hidden_states,
574
+ attentions=encoder_outputs.attentions,
575
+ )
576
+
577
+
578
+ class FlaxCLIPVisionTransformer(nn.Module):
579
+ config: CLIPVisionConfig
580
+ dtype: jnp.dtype = jnp.float32
581
+
582
+ def setup(self):
583
+ self.embeddings = FlaxCLIPVisionEmbeddings(self.config, dtype=self.dtype)
584
+ self.pre_layrnorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
585
+ self.encoder = FlaxCLIPEncoder(self.config, dtype=self.dtype)
586
+ self.post_layernorm = nn.LayerNorm(epsilon=self.config.layer_norm_eps, dtype=self.dtype)
587
+
588
+ def __call__(
589
+ self,
590
+ pixel_values=None,
591
+ deterministic: bool = True,
592
+ output_attentions=None,
593
+ output_hidden_states=None,
594
+ return_dict: bool = True,
595
+ ):
596
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
597
+ output_hidden_states = (
598
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
599
+ )
600
+ return_dict = return_dict if return_dict is not None else self.config.use_return_dict
601
+
602
+ hidden_states = self.embeddings(pixel_values)
603
+ hidden_states = self.pre_layrnorm(hidden_states)
604
+
605
+ encoder_outputs = self.encoder(
606
+ inputs_embeds=hidden_states,
607
+ deterministic=deterministic,
608
+ output_attentions=output_attentions,
609
+ output_hidden_states=output_hidden_states,
610
+ return_dict=return_dict,
611
+ )
612
+
613
+ last_hidden_state = encoder_outputs[0]
614
+ pooled_output = last_hidden_state[:, 0, :]
615
+ pooled_output = self.post_layernorm(pooled_output)
616
+
617
+ if not return_dict:
618
+ return (last_hidden_state, pooled_output) + encoder_outputs[1:]
619
+
620
+ return FlaxBaseModelOutputWithPooling(
621
+ last_hidden_state=last_hidden_state,
622
+ pooler_output=pooled_output,
623
+ hidden_states=encoder_outputs.hidden_states,
624
+ attentions=encoder_outputs.attentions,
625
+ )
626
+
627
+
628
+ class FlaxCLIPTextPreTrainedModel(FlaxPreTrainedModel):
629
+ config_class = CLIPTextConfig
630
+ module_class: nn.Module = None
631
+
632
+ def __init__(
633
+ self,
634
+ config: CLIPTextConfig,
635
+ input_shape=(1, 1),
636
+ seed: int = 0,
637
+ dtype: jnp.dtype = jnp.float32,
638
+ _do_init: bool = True,
639
+ **kwargs,
640
+ ):
641
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
642
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
643
+
644
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
645
+ # init input tensor
646
+ input_ids = jnp.zeros(input_shape, dtype="i4")
647
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape)
648
+ attention_mask = jnp.ones_like(input_ids)
649
+
650
+ params_rng, dropout_rng = jax.random.split(rng)
651
+ rngs = {"params": params_rng, "dropout": dropout_rng}
652
+
653
+ random_params = self.module.init(rngs, input_ids, attention_mask, position_ids)["params"]
654
+
655
+ if params is not None:
656
+ random_params = flatten_dict(unfreeze(random_params))
657
+ params = flatten_dict(unfreeze(params))
658
+ for missing_key in self._missing_keys:
659
+ params[missing_key] = random_params[missing_key]
660
+ self._missing_keys = set()
661
+ return freeze(unflatten_dict(params))
662
+ else:
663
+ return random_params
664
+
665
+ def __call__(
666
+ self,
667
+ input_ids,
668
+ attention_mask=None,
669
+ position_ids=None,
670
+ params: dict = None,
671
+ dropout_rng: jax.random.PRNGKey = None,
672
+ train: bool = False,
673
+ output_attentions: Optional[bool] = None,
674
+ output_hidden_states: Optional[bool] = None,
675
+ return_dict: Optional[bool] = None,
676
+ ):
677
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
678
+ output_hidden_states = (
679
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
680
+ )
681
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
682
+
683
+ if position_ids is None:
684
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
685
+
686
+ if attention_mask is None:
687
+ attention_mask = jnp.ones_like(input_ids)
688
+
689
+ # Handle any PRNG if needed
690
+ rngs = {}
691
+ if dropout_rng is not None:
692
+ rngs["dropout"] = dropout_rng
693
+
694
+ return self.module.apply(
695
+ {"params": params or self.params},
696
+ jnp.array(input_ids, dtype="i4"),
697
+ jnp.array(attention_mask, dtype="i4"),
698
+ jnp.array(position_ids, dtype="i4"),
699
+ not train,
700
+ output_attentions,
701
+ output_hidden_states,
702
+ return_dict,
703
+ rngs=rngs,
704
+ )
705
+
706
+
707
+ class FlaxCLIPVisionPreTrainedModel(FlaxPreTrainedModel):
708
+ config_class = CLIPVisionConfig
709
+ main_input_name = "pixel_values"
710
+ module_class: nn.Module = None
711
+
712
+ def __init__(
713
+ self,
714
+ config: CLIPVisionConfig,
715
+ input_shape: Optional[Tuple] = None,
716
+ seed: int = 0,
717
+ dtype: jnp.dtype = jnp.float32,
718
+ _do_init: bool = True,
719
+ **kwargs,
720
+ ):
721
+ if input_shape is None:
722
+ input_shape = (1, config.image_size, config.image_size, 3)
723
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
724
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
725
+
726
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
727
+ # init input tensor
728
+ pixel_values = jax.random.normal(rng, input_shape)
729
+
730
+ params_rng, dropout_rng = jax.random.split(rng)
731
+ rngs = {"params": params_rng, "dropout": dropout_rng}
732
+
733
+ random_params = self.module.init(rngs, pixel_values)["params"]
734
+
735
+ if params is not None:
736
+ random_params = flatten_dict(unfreeze(random_params))
737
+ params = flatten_dict(unfreeze(params))
738
+ for missing_key in self._missing_keys:
739
+ params[missing_key] = random_params[missing_key]
740
+ self._missing_keys = set()
741
+ return freeze(unflatten_dict(params))
742
+ else:
743
+ return random_params
744
+
745
+ def __call__(
746
+ self,
747
+ pixel_values,
748
+ params: dict = None,
749
+ dropout_rng: jax.random.PRNGKey = None,
750
+ train: bool = False,
751
+ output_attentions: Optional[bool] = None,
752
+ output_hidden_states: Optional[bool] = None,
753
+ return_dict: Optional[bool] = None,
754
+ ):
755
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
756
+ output_hidden_states = (
757
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
758
+ )
759
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
760
+
761
+ pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
762
+
763
+ # Handle any PRNG if needed
764
+ rngs = {}
765
+ if dropout_rng is not None:
766
+ rngs["dropout"] = dropout_rng
767
+
768
+ return self.module.apply(
769
+ {"params": params or self.params},
770
+ jnp.array(pixel_values, dtype=jnp.float32),
771
+ not train,
772
+ output_attentions,
773
+ output_hidden_states,
774
+ return_dict,
775
+ rngs=rngs,
776
+ )
777
+
778
+
779
+ class FlaxCLIPPreTrainedModel(FlaxPreTrainedModel):
780
+ config_class = CLIPConfig
781
+ module_class: nn.Module = None
782
+
783
+ def __init__(
784
+ self,
785
+ config: CLIPConfig,
786
+ input_shape: Optional[Tuple] = None,
787
+ seed: int = 0,
788
+ dtype: jnp.dtype = jnp.float32,
789
+ _do_init: bool = True,
790
+ **kwargs,
791
+ ):
792
+ if input_shape is None:
793
+ input_shape = ((1, 1), (1, config.vision_config.image_size, config.vision_config.image_size, 3))
794
+ module = self.module_class(config=config, dtype=dtype, **kwargs)
795
+ super().__init__(config, module, input_shape=input_shape, seed=seed, dtype=dtype, _do_init=_do_init)
796
+
797
+ def init_weights(self, rng: jax.random.PRNGKey, input_shape: Tuple, params: FrozenDict = None) -> FrozenDict:
798
+ # init input tensor
799
+ input_ids = jnp.zeros(input_shape[0], dtype="i4")
800
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_shape[0])
801
+ attention_mask = jnp.ones_like(input_ids)
802
+
803
+ pixel_values = jax.random.normal(rng, input_shape[1])
804
+
805
+ params_rng, dropout_rng = jax.random.split(rng)
806
+ rngs = {"params": params_rng, "dropout": dropout_rng}
807
+
808
+ random_params = self.module.init(rngs, input_ids, pixel_values, attention_mask, position_ids)["params"]
809
+
810
+ if params is not None:
811
+ random_params = flatten_dict(unfreeze(random_params))
812
+ params = flatten_dict(unfreeze(params))
813
+ for missing_key in self._missing_keys:
814
+ params[missing_key] = random_params[missing_key]
815
+ self._missing_keys = set()
816
+ return freeze(unflatten_dict(params))
817
+ else:
818
+ return random_params
819
+
820
+ def __call__(
821
+ self,
822
+ input_ids,
823
+ pixel_values,
824
+ attention_mask=None,
825
+ position_ids=None,
826
+ params: dict = None,
827
+ dropout_rng: jax.random.PRNGKey = None,
828
+ train: bool = False,
829
+ output_attentions: Optional[bool] = None,
830
+ output_hidden_states: Optional[bool] = None,
831
+ return_dict: Optional[bool] = None,
832
+ ):
833
+ output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
834
+ output_hidden_states = (
835
+ output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
836
+ )
837
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
838
+
839
+ if position_ids is None:
840
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
841
+
842
+ if attention_mask is None:
843
+ attention_mask = jnp.ones_like(input_ids)
844
+
845
+ pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
846
+
847
+ # Handle any PRNG if needed
848
+ rngs = {}
849
+ if dropout_rng is not None:
850
+ rngs["dropout"] = dropout_rng
851
+
852
+ return self.module.apply(
853
+ {"params": params or self.params},
854
+ jnp.array(input_ids, dtype="i4"),
855
+ jnp.array(pixel_values, dtype=jnp.float32),
856
+ jnp.array(attention_mask, dtype="i4"),
857
+ jnp.array(position_ids, dtype="i4"),
858
+ not train,
859
+ output_attentions,
860
+ output_hidden_states,
861
+ return_dict,
862
+ rngs=rngs,
863
+ )
864
+
865
+ def get_text_features(
866
+ self,
867
+ input_ids,
868
+ attention_mask=None,
869
+ position_ids=None,
870
+ params: dict = None,
871
+ dropout_rng: jax.random.PRNGKey = None,
872
+ train=False,
873
+ ):
874
+ r"""
875
+ Args:
876
+ input_ids (`numpy.ndarray` of shape `(batch_size, sequence_length)`):
877
+ Indices of input sequence tokens in the vocabulary. Padding will be ignored by default should you
878
+ provide it.
879
+
880
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.encode`] and
881
+ [`PreTrainedTokenizer.__call__`] for details.
882
+
883
+ [What are input IDs?](../glossary#input-ids)
884
+
885
+ Returns:
886
+ text_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
887
+ the projection layer to the pooled output of [`FlaxCLIPTextModel`].
888
+
889
+ Examples:
890
+
891
+ ```python
892
+ >>> from transformers import AutoTokenizer, FlaxCLIPModel
893
+
894
+ >>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
895
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
896
+
897
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np")
898
+ >>> text_features = model.get_text_features(**inputs)
899
+ ```"""
900
+ if position_ids is None:
901
+ position_ids = jnp.broadcast_to(jnp.arange(jnp.atleast_2d(input_ids).shape[-1]), input_ids.shape)
902
+
903
+ if attention_mask is None:
904
+ attention_mask = jnp.ones_like(input_ids)
905
+
906
+ # Handle any PRNG if needed
907
+ rngs = {}
908
+ if dropout_rng is not None:
909
+ rngs["dropout"] = dropout_rng
910
+
911
+ def _get_features(module, input_ids, attention_mask, position_ids, deterministic):
912
+ text_outputs = module.text_model(
913
+ input_ids=input_ids,
914
+ attention_mask=attention_mask,
915
+ position_ids=position_ids,
916
+ deterministic=deterministic,
917
+ )
918
+ pooled_output = text_outputs[1]
919
+ text_features = module.text_projection(pooled_output)
920
+ return text_features
921
+
922
+ return self.module.apply(
923
+ {"params": params or self.params},
924
+ jnp.array(input_ids, dtype="i4"),
925
+ jnp.array(attention_mask, dtype="i4"),
926
+ jnp.array(position_ids, dtype="i4"),
927
+ not train,
928
+ method=_get_features,
929
+ rngs=rngs,
930
+ )
931
+
932
+ def get_image_features(
933
+ self, pixel_values, params: dict = None, dropout_rng: jax.random.PRNGKey = None, train=False
934
+ ):
935
+ r"""
936
+ Args:
937
+ pixel_values (`numpy.ndarray` of shape `(batch_size, num_channels, height, width)`):
938
+ Pixel values. Padding will be ignored by default should you provide it. Pixel values can be obtained
939
+ using [`AutoImageProcessor`]. See [`CLIPImageProcessor.__call__`] for details.
940
+
941
+ Returns:
942
+ image_features (`jnp.ndarray` of shape `(batch_size, output_dim`): The image embeddings obtained by
943
+ applying the projection layer to the pooled output of [`FlaxCLIPVisionModel`]
944
+
945
+ Examples:
946
+
947
+ ```python
948
+ >>> from PIL import Image
949
+ >>> import requests
950
+ >>> from transformers import AutoProcessor, FlaxCLIPModel
951
+
952
+ >>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
953
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
954
+
955
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
956
+ >>> image = Image.open(requests.get(url, stream=True).raw)
957
+
958
+ >>> inputs = processor(images=image, return_tensors="np")
959
+
960
+ >>> image_features = model.get_image_features(**inputs)
961
+ ```"""
962
+ pixel_values = jnp.transpose(pixel_values, (0, 2, 3, 1))
963
+
964
+ # Handle any PRNG if needed
965
+ rngs = {}
966
+ if dropout_rng is not None:
967
+ rngs["dropout"] = dropout_rng
968
+
969
+ def _get_features(module, pixel_values, deterministic):
970
+ vision_outputs = module.vision_model(pixel_values=pixel_values, deterministic=deterministic)
971
+ pooled_output = vision_outputs[1] # pooled_output
972
+ image_features = module.visual_projection(pooled_output)
973
+ return image_features
974
+
975
+ return self.module.apply(
976
+ {"params": params or self.params},
977
+ jnp.array(pixel_values, dtype=jnp.float32),
978
+ not train,
979
+ method=_get_features,
980
+ rngs=rngs,
981
+ )
982
+
983
+
984
+ class FlaxCLIPTextModule(nn.Module):
985
+ config: CLIPTextConfig
986
+ dtype: jnp.dtype = jnp.float32
987
+
988
+ def setup(self):
989
+ self.text_model = FlaxCLIPTextTransformer(self.config, dtype=self.dtype)
990
+
991
+ def __call__(
992
+ self,
993
+ input_ids,
994
+ attention_mask,
995
+ position_ids,
996
+ deterministic: bool = True,
997
+ output_attentions: bool = False,
998
+ output_hidden_states: bool = False,
999
+ return_dict: bool = True,
1000
+ ):
1001
+ return self.text_model(
1002
+ input_ids=input_ids,
1003
+ attention_mask=attention_mask,
1004
+ position_ids=position_ids,
1005
+ deterministic=deterministic,
1006
+ output_attentions=output_attentions,
1007
+ output_hidden_states=output_hidden_states,
1008
+ return_dict=return_dict,
1009
+ )
1010
+
1011
+
1012
+ class FlaxCLIPTextModel(FlaxCLIPTextPreTrainedModel):
1013
+ module_class = FlaxCLIPTextModule
1014
+
1015
+
1016
+ FLAX_CLIP_TEXT_MODEL_DOCSTRING = """
1017
+ Returns:
1018
+
1019
+ Example:
1020
+
1021
+ ```python
1022
+ >>> from transformers import AutoTokenizer, FlaxCLIPTextModel
1023
+
1024
+ >>> model = FlaxCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
1025
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
1026
+
1027
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np")
1028
+
1029
+ >>> outputs = model(**inputs)
1030
+ >>> last_hidden_state = outputs.last_hidden_state
1031
+ >>> pooler_output = outputs.pooler_output # pooled (EOS token) states
1032
+ ```
1033
+ """
1034
+
1035
+ overwrite_call_docstring(FlaxCLIPTextModel, CLIP_TEXT_INPUTS_DOCSTRING + FLAX_CLIP_TEXT_MODEL_DOCSTRING)
1036
+ append_replace_return_docstrings(
1037
+ FlaxCLIPTextModel, output_type=FlaxBaseModelOutputWithPooling, config_class=CLIPTextConfig
1038
+ )
1039
+
1040
+
1041
+ class FlaxCLIPTextModelWithProjectionModule(nn.Module):
1042
+ config: CLIPTextConfig
1043
+ dtype: jnp.dtype = jnp.float32
1044
+
1045
+ def setup(self):
1046
+ self.text_model = FlaxCLIPTextTransformer(self.config, dtype=self.dtype)
1047
+ self.text_projection = nn.Dense(self.config.projection_dim, use_bias=False, dtype=self.dtype)
1048
+
1049
+ def __call__(
1050
+ self,
1051
+ input_ids,
1052
+ attention_mask,
1053
+ position_ids,
1054
+ deterministic: bool = True,
1055
+ output_attentions: bool = False,
1056
+ output_hidden_states: bool = False,
1057
+ return_dict: bool = True,
1058
+ ):
1059
+ text_outputs = self.text_model(
1060
+ input_ids=input_ids,
1061
+ attention_mask=attention_mask,
1062
+ position_ids=position_ids,
1063
+ deterministic=deterministic,
1064
+ output_attentions=output_attentions,
1065
+ output_hidden_states=output_hidden_states,
1066
+ return_dict=return_dict,
1067
+ )
1068
+
1069
+ pooled_output = text_outputs[1]
1070
+ text_embeds = self.text_projection(pooled_output)
1071
+
1072
+ if not return_dict:
1073
+ return (text_embeds, text_outputs[0]) + text_outputs[2:]
1074
+
1075
+ return FlaxCLIPTextModelOutput(
1076
+ text_embeds=text_embeds,
1077
+ last_hidden_state=text_outputs.last_hidden_state,
1078
+ hidden_states=text_outputs.hidden_states,
1079
+ attentions=text_outputs.attentions,
1080
+ )
1081
+
1082
+
1083
+ class FlaxCLIPTextModelWithProjection(FlaxCLIPTextPreTrainedModel):
1084
+ module_class = FlaxCLIPTextModelWithProjectionModule
1085
+
1086
+
1087
+ FLAX_CLIP_TEXT_MODEL_WITH_PROJECTION_DOCSTRING = """
1088
+ Returns:
1089
+
1090
+ Example:
1091
+
1092
+ ```python
1093
+ >>> from transformers import AutoTokenizer, FlaxCLIPTextModelWithProjection
1094
+
1095
+ >>> model = FlaxCLIPTextModelWithProjection.from_pretrained("openai/clip-vit-base-patch32")
1096
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
1097
+
1098
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="np")
1099
+
1100
+ >>> outputs = model(**inputs)
1101
+ >>> text_embeds = outputs.text_embeds
1102
+ ```
1103
+ """
1104
+
1105
+ overwrite_call_docstring(
1106
+ FlaxCLIPTextModelWithProjection, CLIP_TEXT_INPUTS_DOCSTRING + FLAX_CLIP_TEXT_MODEL_WITH_PROJECTION_DOCSTRING
1107
+ )
1108
+ append_replace_return_docstrings(
1109
+ FlaxCLIPTextModelWithProjection, output_type=FlaxCLIPTextModelOutput, config_class=CLIPTextConfig
1110
+ )
1111
+
1112
+
1113
+ class FlaxCLIPVisionModule(nn.Module):
1114
+ config: CLIPVisionConfig
1115
+ dtype: jnp.dtype = jnp.float32
1116
+
1117
+ def setup(self):
1118
+ self.vision_model = FlaxCLIPVisionTransformer(self.config, dtype=self.dtype)
1119
+
1120
+ def __call__(
1121
+ self,
1122
+ pixel_values,
1123
+ deterministic: bool = True,
1124
+ output_attentions: bool = False,
1125
+ output_hidden_states: bool = False,
1126
+ return_dict: bool = True,
1127
+ ):
1128
+ return self.vision_model(
1129
+ pixel_values=pixel_values,
1130
+ deterministic=deterministic,
1131
+ output_attentions=output_attentions,
1132
+ output_hidden_states=output_hidden_states,
1133
+ return_dict=return_dict,
1134
+ )
1135
+
1136
+
1137
+ class FlaxCLIPVisionModel(FlaxCLIPVisionPreTrainedModel):
1138
+ module_class = FlaxCLIPVisionModule
1139
+
1140
+
1141
+ FLAX_CLIP_VISION_MODEL_DOCSTRING = """
1142
+ Returns:
1143
+
1144
+ Example:
1145
+
1146
+ ```python
1147
+ >>> from PIL import Image
1148
+ >>> import requests
1149
+ >>> from transformers import AutoProcessor, FlaxCLIPVisionModel
1150
+
1151
+ >>> model = FlaxCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
1152
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1153
+
1154
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1155
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1156
+
1157
+ >>> inputs = processor(images=image, return_tensors="np")
1158
+
1159
+ >>> outputs = model(**inputs)
1160
+ >>> last_hidden_state = outputs.last_hidden_state
1161
+ >>> pooler_output = outputs.pooler_output # pooled CLS states
1162
+ ```
1163
+ """
1164
+
1165
+ overwrite_call_docstring(FlaxCLIPVisionModel, CLIP_VISION_INPUTS_DOCSTRING + FLAX_CLIP_VISION_MODEL_DOCSTRING)
1166
+ append_replace_return_docstrings(
1167
+ FlaxCLIPVisionModel, output_type=FlaxBaseModelOutputWithPooling, config_class=CLIPVisionConfig
1168
+ )
1169
+
1170
+
1171
+ class FlaxCLIPModule(nn.Module):
1172
+ config: CLIPConfig
1173
+ dtype: jnp.dtype = jnp.float32
1174
+
1175
+ def setup(self):
1176
+ text_config = self.config.text_config
1177
+ vision_config = self.config.vision_config
1178
+
1179
+ self.projection_dim = self.config.projection_dim
1180
+ self.text_embed_dim = text_config.hidden_size
1181
+ self.vision_embed_dim = vision_config.hidden_size
1182
+
1183
+ self.text_model = FlaxCLIPTextTransformer(text_config, dtype=self.dtype)
1184
+ self.vision_model = FlaxCLIPVisionTransformer(vision_config, dtype=self.dtype)
1185
+
1186
+ self.visual_projection = nn.Dense(
1187
+ self.projection_dim,
1188
+ dtype=self.dtype,
1189
+ kernel_init=jax.nn.initializers.normal(0.02),
1190
+ use_bias=False,
1191
+ )
1192
+ self.text_projection = nn.Dense(
1193
+ self.projection_dim,
1194
+ dtype=self.dtype,
1195
+ kernel_init=jax.nn.initializers.normal(0.02),
1196
+ use_bias=False,
1197
+ )
1198
+
1199
+ self.logit_scale = self.param(
1200
+ "logit_scale", lambda _, shape: jnp.ones(shape) * self.config.logit_scale_init_value, []
1201
+ )
1202
+
1203
+ def __call__(
1204
+ self,
1205
+ input_ids=None,
1206
+ pixel_values=None,
1207
+ attention_mask=None,
1208
+ position_ids=None,
1209
+ deterministic: bool = True,
1210
+ output_attentions=None,
1211
+ output_hidden_states=None,
1212
+ return_dict=None,
1213
+ ):
1214
+ return_dict = return_dict if return_dict is not None else self.config.return_dict
1215
+
1216
+ vision_outputs = self.vision_model(
1217
+ pixel_values=pixel_values,
1218
+ deterministic=deterministic,
1219
+ output_attentions=output_attentions,
1220
+ output_hidden_states=output_hidden_states,
1221
+ return_dict=return_dict,
1222
+ )
1223
+
1224
+ text_outputs = self.text_model(
1225
+ input_ids=input_ids,
1226
+ attention_mask=attention_mask,
1227
+ position_ids=position_ids,
1228
+ deterministic=deterministic,
1229
+ output_attentions=output_attentions,
1230
+ output_hidden_states=output_hidden_states,
1231
+ return_dict=return_dict,
1232
+ )
1233
+
1234
+ image_embeds = vision_outputs[1]
1235
+ image_embeds = self.visual_projection(image_embeds)
1236
+
1237
+ text_embeds = text_outputs[1]
1238
+ text_embeds = self.text_projection(text_embeds)
1239
+
1240
+ # normalized features
1241
+ image_embeds = image_embeds / jnp.linalg.norm(image_embeds, axis=-1, keepdims=True)
1242
+ text_embeds = text_embeds / jnp.linalg.norm(text_embeds, axis=-1, keepdims=True)
1243
+
1244
+ # cosine similarity as logits
1245
+ logit_scale = jnp.exp(self.logit_scale)
1246
+ logits_per_text = jnp.matmul(text_embeds, image_embeds.T) * logit_scale
1247
+ logits_per_image = logits_per_text.T
1248
+
1249
+ if not return_dict:
1250
+ return (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1251
+
1252
+ return FlaxCLIPOutput(
1253
+ logits_per_image=logits_per_image,
1254
+ logits_per_text=logits_per_text,
1255
+ text_embeds=text_embeds,
1256
+ image_embeds=image_embeds,
1257
+ text_model_output=text_outputs,
1258
+ vision_model_output=vision_outputs,
1259
+ )
1260
+
1261
+
1262
+ @add_start_docstrings(CLIP_START_DOCSTRING)
1263
+ class FlaxCLIPModel(FlaxCLIPPreTrainedModel):
1264
+ module_class = FlaxCLIPModule
1265
+
1266
+
1267
+ FLAX_CLIP_MODEL_DOCSTRING = """
1268
+ Returns:
1269
+
1270
+ Example:
1271
+
1272
+ ```python
1273
+ >>> import jax
1274
+ >>> from PIL import Image
1275
+ >>> import requests
1276
+ >>> from transformers import AutoProcessor, FlaxCLIPModel
1277
+
1278
+ >>> model = FlaxCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
1279
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1280
+
1281
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1282
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1283
+
1284
+ >>> inputs = processor(
1285
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="np", padding=True
1286
+ ... )
1287
+
1288
+ >>> outputs = model(**inputs)
1289
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1290
+ >>> probs = jax.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
1291
+ ```
1292
+ """
1293
+
1294
+ overwrite_call_docstring(FlaxCLIPModel, CLIP_INPUTS_DOCSTRING + FLAX_CLIP_MODEL_DOCSTRING)
1295
+ append_replace_return_docstrings(FlaxCLIPModel, output_type=FlaxCLIPOutput, config_class=CLIPConfig)
venv/lib/python3.10/site-packages/transformers/models/clip/modeling_tf_clip.py ADDED
@@ -0,0 +1,1461 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The OpenAI Team Authors and The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """ TF 2.0 CLIP model."""
16
+
17
+
18
+ from __future__ import annotations
19
+
20
+ import math
21
+ from dataclasses import dataclass
22
+ from typing import Any, Optional, Tuple, Union
23
+
24
+ import numpy as np
25
+ import tensorflow as tf
26
+
27
+ from ...activations_tf import get_tf_activation
28
+ from ...modeling_tf_outputs import TFBaseModelOutput, TFBaseModelOutputWithPooling
29
+
30
+ # Public API
31
+ from ...modeling_tf_utils import (
32
+ TFModelInputType,
33
+ TFPreTrainedModel,
34
+ get_initializer,
35
+ keras,
36
+ keras_serializable,
37
+ unpack_inputs,
38
+ )
39
+ from ...tf_utils import check_embeddings_within_bounds, shape_list, stable_softmax
40
+ from ...utils import (
41
+ ModelOutput,
42
+ add_start_docstrings,
43
+ add_start_docstrings_to_model_forward,
44
+ logging,
45
+ replace_return_docstrings,
46
+ )
47
+ from .configuration_clip import CLIPConfig, CLIPTextConfig, CLIPVisionConfig
48
+
49
+
50
+ logger = logging.get_logger(__name__)
51
+
52
+ _CHECKPOINT_FOR_DOC = "openai/clip-vit-base-patch32"
53
+
54
+
55
+ from ..deprecated._archive_maps import TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST # noqa: F401, E402
56
+
57
+
58
+ LARGE_NEGATIVE = -1e8
59
+
60
+
61
+ # Copied from transformers.models.bart.modeling_tf_bart._expand_mask
62
+ def _expand_mask(mask: tf.Tensor, tgt_len: Optional[int] = None):
63
+ """
64
+ Expands attention_mask from `[bsz, seq_len]` to `[bsz, 1, tgt_seq_len, src_seq_len]`.
65
+ """
66
+ src_len = shape_list(mask)[1]
67
+ tgt_len = tgt_len if tgt_len is not None else src_len
68
+ one_cst = tf.constant(1.0)
69
+ mask = tf.cast(mask, dtype=one_cst.dtype)
70
+ expanded_mask = tf.tile(mask[:, None, None, :], (1, 1, tgt_len, 1))
71
+
72
+ return (one_cst - expanded_mask) * LARGE_NEGATIVE
73
+
74
+
75
+ # contrastive loss function, adapted from
76
+ # https://sachinruk.github.io/blog/pytorch/pytorch%20lightning/loss%20function/gpu/2021/03/07/CLIP.html
77
+ def contrastive_loss(logits: tf.Tensor) -> tf.Tensor:
78
+ return tf.math.reduce_mean(
79
+ keras.metrics.sparse_categorical_crossentropy(
80
+ y_true=tf.range(shape_list(logits)[0]), y_pred=logits, from_logits=True
81
+ )
82
+ )
83
+
84
+
85
+ def clip_loss(similarity: tf.Tensor) -> tf.Tensor:
86
+ caption_loss = contrastive_loss(similarity)
87
+ image_loss = contrastive_loss(tf.transpose(similarity))
88
+ return (caption_loss + image_loss) / 2.0
89
+
90
+
91
+ @dataclass
92
+ class TFCLIPOutput(ModelOutput):
93
+ """
94
+ Args:
95
+ loss (`tf.Tensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
96
+ Contrastive loss for image-text similarity.
97
+ logits_per_image:(`tf.Tensor` of shape `(image_batch_size, text_batch_size)`):
98
+ The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
99
+ similarity scores.
100
+ logits_per_text:(`tf.Tensor` of shape `(text_batch_size, image_batch_size)`):
101
+ The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
102
+ similarity scores.
103
+ text_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
104
+ The text embeddings obtained by applying the projection layer to the pooled output of [`TFCLIPTextModel`].
105
+ image_embeds(`tf.Tensor` of shape `(batch_size, output_dim`):
106
+ The image embeddings obtained by applying the projection layer to the pooled output of
107
+ [`TFCLIPVisionModel`].
108
+ text_model_output([`~modeling_tf_utils.TFBaseModelOutputWithPooling`]):
109
+ The output of the [`TFCLIPTextModel`].
110
+ vision_model_output([`~modeling_tf_utils.TFBaseModelOutputWithPooling`]):
111
+ The output of the [`TFCLIPVisionModel`].
112
+ """
113
+
114
+ loss: tf.Tensor | None = None
115
+ logits_per_image: tf.Tensor = None
116
+ logits_per_text: tf.Tensor = None
117
+ text_embeds: tf.Tensor = None
118
+ image_embeds: tf.Tensor = None
119
+ text_model_output: TFBaseModelOutputWithPooling = None
120
+ vision_model_output: TFBaseModelOutputWithPooling = None
121
+
122
+ def to_tuple(self) -> Tuple[Any]:
123
+ return tuple(
124
+ self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
125
+ for k in self.keys()
126
+ )
127
+
128
+
129
+ class TFCLIPVisionEmbeddings(keras.layers.Layer):
130
+ def __init__(self, config: CLIPVisionConfig, **kwargs):
131
+ super().__init__(**kwargs)
132
+
133
+ self.embed_dim = config.hidden_size
134
+ self.image_size = config.image_size
135
+ self.patch_size = config.patch_size
136
+
137
+ self.num_patches = (self.image_size // self.patch_size) ** 2
138
+ self.num_positions = self.num_patches + 1
139
+
140
+ self.config = config
141
+
142
+ self.patch_embedding = keras.layers.Conv2D(
143
+ filters=self.embed_dim,
144
+ kernel_size=self.patch_size,
145
+ strides=self.patch_size,
146
+ padding="valid",
147
+ data_format="channels_last",
148
+ use_bias=False,
149
+ kernel_initializer=get_initializer(self.config.initializer_range * self.config.initializer_factor),
150
+ name="patch_embedding",
151
+ )
152
+
153
+ def build(self, input_shape: tf.TensorShape = None):
154
+ factor = self.config.initializer_factor
155
+
156
+ self.class_embedding = self.add_weight(
157
+ shape=(self.embed_dim,),
158
+ initializer=get_initializer(self.embed_dim**-0.5 * factor),
159
+ trainable=True,
160
+ name="class_embedding",
161
+ )
162
+
163
+ with tf.name_scope("position_embedding"):
164
+ self.position_embedding = self.add_weight(
165
+ shape=(self.num_positions, self.embed_dim),
166
+ initializer=get_initializer(self.config.initializer_range * factor),
167
+ trainable=True,
168
+ name="embeddings",
169
+ )
170
+
171
+ if self.built:
172
+ return
173
+ self.built = True
174
+ if getattr(self, "patch_embedding", None) is not None:
175
+ with tf.name_scope(self.patch_embedding.name):
176
+ self.patch_embedding.build([None, None, None, self.config.num_channels])
177
+
178
+ def call(self, pixel_values: tf.Tensor) -> tf.Tensor:
179
+ """`pixel_values` is expected to be of NCHW format."""
180
+
181
+ batch_size, num_channels, height, width = shape_list(pixel_values)
182
+
183
+ # When running on CPU, `tf.nn.conv2d` doesn't support `NCHW` format.
184
+ # So change the input format from `NCHW` to `NHWC`.
185
+ # shape = (batch_size, in_height, in_width, in_channels=num_channels)
186
+ pixel_values = tf.transpose(pixel_values, perm=(0, 2, 3, 1))
187
+
188
+ patch_embeds = self.patch_embedding(pixel_values)
189
+
190
+ # Change the 2D spatial dimensions to a single temporal dimension.
191
+ # shape = (batch_size, num_patches, out_channels=embed_dim)
192
+ patch_embeds = tf.reshape(tensor=patch_embeds, shape=(batch_size, self.num_patches, -1))
193
+
194
+ # add the [CLS] token to the embedded patch tokens
195
+ class_embeds = tf.broadcast_to(self.class_embedding, shape=(batch_size, 1, self.embed_dim))
196
+ embeddings = tf.concat((class_embeds, patch_embeds), axis=1)
197
+
198
+ embeddings = embeddings + self.position_embedding
199
+
200
+ return embeddings
201
+
202
+
203
+ class TFCLIPTextEmbeddings(keras.layers.Layer):
204
+ def __init__(self, config: CLIPTextConfig, **kwargs):
205
+ super().__init__(**kwargs)
206
+
207
+ self.embed_dim = config.hidden_size
208
+
209
+ self.config = config
210
+
211
+ def build(self, input_shape: tf.TensorShape = None):
212
+ with tf.name_scope("token_embedding"):
213
+ self.weight = self.add_weight(
214
+ shape=(self.config.vocab_size, self.embed_dim),
215
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
216
+ trainable=True,
217
+ name="weight",
218
+ )
219
+
220
+ with tf.name_scope("position_embedding"):
221
+ self.position_embedding = self.add_weight(
222
+ shape=(self.config.max_position_embeddings, self.embed_dim),
223
+ initializer=get_initializer(self.config.initializer_factor * self.config.initializer_range),
224
+ trainable=True,
225
+ name="embeddings",
226
+ )
227
+
228
+ super().build(input_shape)
229
+
230
+ def call(
231
+ self,
232
+ input_ids: tf.Tensor = None,
233
+ position_ids: tf.Tensor = None,
234
+ inputs_embeds: tf.Tensor = None,
235
+ ) -> tf.Tensor:
236
+ """
237
+ Applies embedding based on inputs tensor.
238
+
239
+ Returns:
240
+ final_embeddings (`tf.Tensor`): output embedding tensor.
241
+ """
242
+ if input_ids is None and inputs_embeds is None:
243
+ raise ValueError("You have to specify either input_ids or inputs_embeds")
244
+
245
+ if inputs_embeds is None:
246
+ check_embeddings_within_bounds(input_ids, self.config.vocab_size)
247
+ inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
248
+
249
+ input_shape = shape_list(inputs_embeds)[:-1]
250
+
251
+ if position_ids is None:
252
+ position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
253
+
254
+ position_embeds = tf.gather(params=self.position_embedding, indices=position_ids)
255
+ position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
256
+ final_embeddings = inputs_embeds + position_embeds
257
+
258
+ return final_embeddings
259
+
260
+
261
+ class TFCLIPAttention(keras.layers.Layer):
262
+ """Multi-headed attention from 'Attention Is All You Need' paper"""
263
+
264
+ def __init__(self, config: CLIPConfig, **kwargs):
265
+ super().__init__(**kwargs)
266
+
267
+ self.embed_dim = config.hidden_size
268
+ self.num_attention_heads = config.num_attention_heads
269
+ self.attention_head_size = self.embed_dim // self.num_attention_heads
270
+ if self.attention_head_size * self.num_attention_heads != self.embed_dim:
271
+ raise ValueError(
272
+ f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
273
+ f" {self.num_attention_heads})."
274
+ )
275
+
276
+ factor = config.initializer_factor
277
+ in_proj_std = (self.embed_dim**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor
278
+ out_proj_std = (self.embed_dim**-0.5) * factor
279
+
280
+ self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
281
+
282
+ self.q_proj = keras.layers.Dense(
283
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="q_proj"
284
+ )
285
+ self.k_proj = keras.layers.Dense(
286
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="k_proj"
287
+ )
288
+ self.v_proj = keras.layers.Dense(
289
+ units=self.embed_dim, kernel_initializer=get_initializer(in_proj_std), name="v_proj"
290
+ )
291
+
292
+ self.dropout = keras.layers.Dropout(rate=config.attention_dropout)
293
+
294
+ self.out_proj = keras.layers.Dense(
295
+ units=self.embed_dim, kernel_initializer=get_initializer(out_proj_std), name="out_proj"
296
+ )
297
+
298
+ # copied from transformers.models.bert.modeling_tf_bert.TFBertSelfAttention.transpose_for_scores
299
+ def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
300
+ # Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
301
+ tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
302
+
303
+ # Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
304
+ return tf.transpose(tensor, perm=[0, 2, 1, 3])
305
+
306
+ def call(
307
+ self,
308
+ hidden_states: tf.Tensor,
309
+ attention_mask: tf.Tensor,
310
+ causal_attention_mask: tf.Tensor,
311
+ output_attentions: bool,
312
+ training: bool = False,
313
+ ) -> Tuple[tf.Tensor]:
314
+ """Input shape: Batch x Time x Channel"""
315
+
316
+ batch_size = shape_list(hidden_states)[0]
317
+ mixed_query_layer = self.q_proj(inputs=hidden_states)
318
+ mixed_key_layer = self.k_proj(inputs=hidden_states)
319
+ mixed_value_layer = self.v_proj(inputs=hidden_states)
320
+ query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
321
+ key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
322
+ value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
323
+
324
+ # Take the dot product between "query" and "key" to get the raw attention scores.
325
+ # (batch size, num_heads, seq_len_q, seq_len_k)
326
+ attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
327
+ dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
328
+ attention_scores = tf.divide(attention_scores, dk)
329
+
330
+ # apply the causal_attention_mask first
331
+ if causal_attention_mask is not None:
332
+ # Apply the causal attention mask (precomputed for all layers in TFCLIPModel call() function)
333
+ attention_scores = tf.add(attention_scores, causal_attention_mask)
334
+
335
+ if attention_mask is not None:
336
+ # Apply the attention mask (precomputed for all layers in TFCLIPModel call() function)
337
+ attention_scores = tf.add(attention_scores, attention_mask)
338
+
339
+ # Normalize the attention scores to probabilities.
340
+ _attention_probs = stable_softmax(logits=attention_scores, axis=-1)
341
+
342
+ # This is actually dropping out entire tokens to attend to, which might
343
+ # seem a bit unusual, but is taken from the original Transformer paper.
344
+ attention_probs = self.dropout(inputs=_attention_probs, training=training)
345
+
346
+ attention_output = tf.matmul(attention_probs, value_layer)
347
+ attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
348
+
349
+ # (batch_size, seq_len_q, embed_dim)
350
+ attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.embed_dim))
351
+
352
+ attention_output = self.out_proj(attention_output, training=training)
353
+ # In TFBert, attention weights are returned after dropout.
354
+ # However, in CLIP, they are returned before dropout.
355
+ outputs = (attention_output, _attention_probs) if output_attentions else (attention_output,)
356
+
357
+ return outputs
358
+
359
+ def build(self, input_shape=None):
360
+ if self.built:
361
+ return
362
+ self.built = True
363
+ if getattr(self, "q_proj", None) is not None:
364
+ with tf.name_scope(self.q_proj.name):
365
+ self.q_proj.build([None, None, self.embed_dim])
366
+ if getattr(self, "k_proj", None) is not None:
367
+ with tf.name_scope(self.k_proj.name):
368
+ self.k_proj.build([None, None, self.embed_dim])
369
+ if getattr(self, "v_proj", None) is not None:
370
+ with tf.name_scope(self.v_proj.name):
371
+ self.v_proj.build([None, None, self.embed_dim])
372
+ if getattr(self, "out_proj", None) is not None:
373
+ with tf.name_scope(self.out_proj.name):
374
+ self.out_proj.build([None, None, self.embed_dim])
375
+
376
+
377
+ class TFCLIPMLP(keras.layers.Layer):
378
+ def __init__(self, config: CLIPConfig, **kwargs):
379
+ super().__init__(**kwargs)
380
+
381
+ self.activation_fn = get_tf_activation(config.hidden_act)
382
+
383
+ factor = config.initializer_factor
384
+ in_proj_std = (config.hidden_size**-0.5) * ((2 * config.num_hidden_layers) ** -0.5) * factor
385
+ fc_std = (2 * config.hidden_size) ** -0.5 * factor
386
+
387
+ self.fc1 = keras.layers.Dense(
388
+ units=config.intermediate_size, kernel_initializer=get_initializer(fc_std), name="fc1"
389
+ )
390
+ self.fc2 = keras.layers.Dense(
391
+ units=config.hidden_size, kernel_initializer=get_initializer(in_proj_std), name="fc2"
392
+ )
393
+ self.config = config
394
+
395
+ def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
396
+ hidden_states = self.fc1(inputs=hidden_states)
397
+ hidden_states = self.activation_fn(hidden_states)
398
+ hidden_states = self.fc2(inputs=hidden_states)
399
+ return hidden_states
400
+
401
+ def build(self, input_shape=None):
402
+ if self.built:
403
+ return
404
+ self.built = True
405
+ if getattr(self, "fc1", None) is not None:
406
+ with tf.name_scope(self.fc1.name):
407
+ self.fc1.build([None, None, self.config.hidden_size])
408
+ if getattr(self, "fc2", None) is not None:
409
+ with tf.name_scope(self.fc2.name):
410
+ self.fc2.build([None, None, self.config.intermediate_size])
411
+
412
+
413
+ class TFCLIPEncoderLayer(keras.layers.Layer):
414
+ def __init__(self, config: CLIPConfig, **kwargs):
415
+ super().__init__(**kwargs)
416
+
417
+ self.embed_dim = config.hidden_size
418
+ self.self_attn = TFCLIPAttention(config, name="self_attn")
419
+ self.layer_norm1 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm1")
420
+ self.mlp = TFCLIPMLP(config, name="mlp")
421
+ self.layer_norm2 = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="layer_norm2")
422
+
423
+ def call(
424
+ self,
425
+ hidden_states: tf.Tensor,
426
+ attention_mask: tf.Tensor,
427
+ causal_attention_mask: tf.Tensor,
428
+ output_attentions: bool,
429
+ training: bool = False,
430
+ ) -> Tuple[tf.Tensor]:
431
+ """
432
+ Args:
433
+ hidden_states (`tf.Tensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
434
+ attention_mask (`tf.Tensor`): attention mask of size
435
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
436
+ causal_attention_mask (`tf.Tensor`): causal attention mask of size
437
+ `(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
438
+ output_attentions (`bool`):
439
+ Whether or not to return the attentions tensors of all attention layers. See `outputs` under returned
440
+ tensors for more detail.
441
+ """
442
+ residual = hidden_states
443
+
444
+ hidden_states = self.layer_norm1(inputs=hidden_states)
445
+ attention_outputs = self.self_attn(
446
+ hidden_states=hidden_states,
447
+ attention_mask=attention_mask,
448
+ causal_attention_mask=causal_attention_mask,
449
+ output_attentions=output_attentions,
450
+ training=training,
451
+ )
452
+ hidden_states = attention_outputs[0]
453
+ hidden_states = residual + hidden_states
454
+
455
+ residual = hidden_states
456
+ hidden_states = self.layer_norm2(inputs=hidden_states)
457
+ hidden_states = self.mlp(hidden_states=hidden_states)
458
+ hidden_states = residual + hidden_states
459
+
460
+ outputs = (hidden_states,) + attention_outputs[1:] # add attentions if we output them
461
+
462
+ return outputs
463
+
464
+ def build(self, input_shape=None):
465
+ if self.built:
466
+ return
467
+ self.built = True
468
+ if getattr(self, "self_attn", None) is not None:
469
+ with tf.name_scope(self.self_attn.name):
470
+ self.self_attn.build(None)
471
+ if getattr(self, "layer_norm1", None) is not None:
472
+ with tf.name_scope(self.layer_norm1.name):
473
+ self.layer_norm1.build([None, None, self.embed_dim])
474
+ if getattr(self, "mlp", None) is not None:
475
+ with tf.name_scope(self.mlp.name):
476
+ self.mlp.build(None)
477
+ if getattr(self, "layer_norm2", None) is not None:
478
+ with tf.name_scope(self.layer_norm2.name):
479
+ self.layer_norm2.build([None, None, self.embed_dim])
480
+
481
+
482
+ class TFCLIPEncoder(keras.layers.Layer):
483
+ """
484
+ Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
485
+ [`TFCLIPEncoderLayer`].
486
+
487
+ Args:
488
+ config: CLIPConfig
489
+ """
490
+
491
+ def __init__(self, config: CLIPConfig, **kwargs):
492
+ super().__init__(**kwargs)
493
+
494
+ self.layers = [TFCLIPEncoderLayer(config, name=f"layers_._{i}") for i in range(config.num_hidden_layers)]
495
+
496
+ def call(
497
+ self,
498
+ hidden_states: tf.Tensor,
499
+ attention_mask: tf.Tensor,
500
+ causal_attention_mask: tf.Tensor,
501
+ output_attentions: bool,
502
+ output_hidden_states: bool,
503
+ return_dict: bool,
504
+ training: bool = False,
505
+ ) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
506
+ all_hidden_states = () if output_hidden_states else None
507
+ all_attentions = () if output_attentions else None
508
+
509
+ for i, layer_module in enumerate(self.layers):
510
+ if output_hidden_states:
511
+ all_hidden_states = all_hidden_states + (hidden_states,)
512
+
513
+ layer_outputs = layer_module(
514
+ hidden_states=hidden_states,
515
+ attention_mask=attention_mask,
516
+ causal_attention_mask=causal_attention_mask,
517
+ output_attentions=output_attentions,
518
+ training=training,
519
+ )
520
+ hidden_states = layer_outputs[0]
521
+
522
+ if output_attentions:
523
+ all_attentions = all_attentions + (layer_outputs[1],)
524
+
525
+ # Add last layer
526
+ if output_hidden_states:
527
+ all_hidden_states = all_hidden_states + (hidden_states,)
528
+
529
+ if not return_dict:
530
+ return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
531
+
532
+ return TFBaseModelOutput(
533
+ last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
534
+ )
535
+
536
+ def build(self, input_shape=None):
537
+ if self.built:
538
+ return
539
+ self.built = True
540
+ if getattr(self, "layers", None) is not None:
541
+ for layer in self.layers:
542
+ with tf.name_scope(layer.name):
543
+ layer.build(None)
544
+
545
+
546
+ class TFCLIPTextTransformer(keras.layers.Layer):
547
+ def __init__(self, config: CLIPTextConfig, **kwargs):
548
+ super().__init__(**kwargs)
549
+
550
+ self.embeddings = TFCLIPTextEmbeddings(config, name="embeddings")
551
+ self.encoder = TFCLIPEncoder(config, name="encoder")
552
+ self.final_layer_norm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="final_layer_norm")
553
+
554
+ # For `pooled_output` computation
555
+ self.eos_token_id = config.eos_token_id
556
+ self.embed_dim = config.hidden_size
557
+
558
+ def call(
559
+ self,
560
+ input_ids: TFModelInputType,
561
+ attention_mask: tf.Tensor,
562
+ position_ids: tf.Tensor,
563
+ output_attentions: bool,
564
+ output_hidden_states: bool,
565
+ return_dict: bool,
566
+ training: bool = False,
567
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
568
+ input_shape = shape_list(input_ids)
569
+
570
+ embedding_output = self.embeddings(input_ids=input_ids, position_ids=position_ids)
571
+
572
+ batch_size, seq_length = input_shape
573
+ # CLIP's text model uses causal mask, prepare it here.
574
+ # https://github.com/openai/CLIP/blob/cfcffb90e69f37bf2ff1e988237a0fbe41f33c04/clip/model.py#L324
575
+ causal_attention_mask = self._build_causal_attention_mask(batch_size, seq_length, dtype=embedding_output.dtype)
576
+
577
+ # check attention mask and invert
578
+ # [bsz, seq_len] -> [bsz, 1, tgt_seq_len, src_seq_len]
579
+ attention_mask = _expand_mask(attention_mask)
580
+
581
+ encoder_outputs = self.encoder(
582
+ hidden_states=embedding_output,
583
+ attention_mask=attention_mask,
584
+ causal_attention_mask=causal_attention_mask,
585
+ output_attentions=output_attentions,
586
+ output_hidden_states=output_hidden_states,
587
+ return_dict=return_dict,
588
+ training=training,
589
+ )
590
+
591
+ sequence_output = encoder_outputs[0]
592
+ sequence_output = self.final_layer_norm(inputs=sequence_output)
593
+
594
+ if self.eos_token_id == 2:
595
+ # The `eos_token_id` was incorrect before PR #24773: Let's keep what have been done here.
596
+ # A CLIP model with such `eos_token_id` in the config can't work correctly with extra new tokens added
597
+ # ------------------------------------------------------------
598
+ # text_embeds.shape = [batch_size, n_ctx, transformer.width]
599
+ # take features from the eot embedding (eot_token is the highest number in each sequence)
600
+ pooled_output = tf.gather_nd(
601
+ params=sequence_output,
602
+ indices=tf.stack(
603
+ values=(tf.range(input_shape[0], dtype=tf.int64), tf.math.argmax(input_ids, axis=-1)), axis=1
604
+ ),
605
+ )
606
+ else:
607
+ # The config gets updated `eos_token_id` from PR #24773 (so the use of exta new tokens is possible)
608
+ pooled_output = tf.gather_nd(
609
+ params=sequence_output,
610
+ indices=tf.stack(
611
+ values=(
612
+ tf.range(input_shape[0], dtype=tf.int64),
613
+ tf.math.argmax(tf.cast(input_ids == self.eos_token_id, dtype=tf.int8), axis=-1),
614
+ ),
615
+ axis=1,
616
+ ),
617
+ )
618
+
619
+ if not return_dict:
620
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
621
+
622
+ return TFBaseModelOutputWithPooling(
623
+ last_hidden_state=sequence_output,
624
+ pooler_output=pooled_output,
625
+ hidden_states=encoder_outputs.hidden_states,
626
+ attentions=encoder_outputs.attentions,
627
+ )
628
+
629
+ def _build_causal_attention_mask(self, batch_size, seq_length, dtype=tf.float32):
630
+ # It is possible with an unspecified sequence length for seq_length to be
631
+ # a runtime value, which is unsupported by tf.constant. Per the TensorFlow
632
+ # docs, tf.fill can handle runtime dynamic shapes:
633
+ # https://www.tensorflow.org/api_docs/python/tf/fill
634
+ diag = tf.cast(tf.fill((seq_length,), 0.0), dtype)
635
+
636
+ # set an additive 2D attention mask with all places being masked
637
+ to_mask = tf.cast(tf.fill((seq_length, seq_length), -10000.0), dtype)
638
+
639
+ # set diagonal & lower triangular parts to 0 (i.e. the places not to be masked)
640
+ # TIP: think the 2D matrix as the space of (query_seq, key_seq)
641
+ to_mask = tf.linalg.band_part(to_mask, 0, -1)
642
+ # to_mask = tf.linalg.band_part(to_mask, -1, 0)
643
+ to_mask = tf.linalg.set_diag(to_mask, diagonal=diag)
644
+
645
+ return tf.broadcast_to(input=to_mask, shape=(batch_size, 1, seq_length, seq_length))
646
+
647
+ def build(self, input_shape=None):
648
+ if self.built:
649
+ return
650
+ self.built = True
651
+ if getattr(self, "embeddings", None) is not None:
652
+ with tf.name_scope(self.embeddings.name):
653
+ self.embeddings.build(None)
654
+ if getattr(self, "encoder", None) is not None:
655
+ with tf.name_scope(self.encoder.name):
656
+ self.encoder.build(None)
657
+ if getattr(self, "final_layer_norm", None) is not None:
658
+ with tf.name_scope(self.final_layer_norm.name):
659
+ self.final_layer_norm.build([None, None, self.embed_dim])
660
+
661
+
662
+ @keras_serializable
663
+ class TFCLIPTextMainLayer(keras.layers.Layer):
664
+ config_class = CLIPTextConfig
665
+
666
+ def __init__(self, config: CLIPTextConfig, **kwargs):
667
+ super().__init__(**kwargs)
668
+ self.config = config
669
+ self.text_model = TFCLIPTextTransformer(config, name="text_model")
670
+
671
+ def get_input_embeddings(self) -> keras.layers.Layer:
672
+ return self.text_model.embeddings
673
+
674
+ def set_input_embeddings(self, value: tf.Variable):
675
+ self.text_model.embeddings.weight = value
676
+ self.text_model.embeddings.vocab_size = shape_list(value)[0]
677
+
678
+ @unpack_inputs
679
+ def call(
680
+ self,
681
+ input_ids: TFModelInputType | None = None,
682
+ attention_mask: np.ndarray | tf.Tensor | None = None,
683
+ position_ids: np.ndarray | tf.Tensor | None = None,
684
+ output_attentions: Optional[bool] = None,
685
+ output_hidden_states: Optional[bool] = None,
686
+ return_dict: Optional[bool] = None,
687
+ training: bool = False,
688
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
689
+ if input_ids is None:
690
+ raise ValueError("You have to specify input_ids")
691
+
692
+ input_shape = shape_list(input_ids)
693
+
694
+ if attention_mask is None:
695
+ attention_mask = tf.fill(dims=input_shape, value=1)
696
+
697
+ text_model_outputs = self.text_model(
698
+ input_ids=input_ids,
699
+ attention_mask=attention_mask,
700
+ position_ids=position_ids,
701
+ output_attentions=output_attentions,
702
+ output_hidden_states=output_hidden_states,
703
+ return_dict=return_dict,
704
+ training=training,
705
+ )
706
+
707
+ return text_model_outputs
708
+
709
+ def build(self, input_shape=None):
710
+ if self.built:
711
+ return
712
+ self.built = True
713
+ if getattr(self, "text_model", None) is not None:
714
+ with tf.name_scope(self.text_model.name):
715
+ self.text_model.build(None)
716
+
717
+
718
+ class TFCLIPVisionTransformer(keras.layers.Layer):
719
+ def __init__(self, config: CLIPVisionConfig, **kwargs):
720
+ super().__init__(**kwargs)
721
+
722
+ self.embeddings = TFCLIPVisionEmbeddings(config, name="embeddings")
723
+ self.pre_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="pre_layrnorm")
724
+ self.encoder = TFCLIPEncoder(config, name="encoder")
725
+ self.post_layernorm = keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="post_layernorm")
726
+ self.embed_dim = config.hidden_size
727
+
728
+ def call(
729
+ self,
730
+ pixel_values: TFModelInputType,
731
+ output_attentions: bool,
732
+ output_hidden_states: bool,
733
+ return_dict: bool,
734
+ training: bool = False,
735
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
736
+ embedding_output = self.embeddings(pixel_values=pixel_values)
737
+ embedding_output = self.pre_layernorm(inputs=embedding_output)
738
+
739
+ encoder_outputs = self.encoder(
740
+ hidden_states=embedding_output,
741
+ attention_mask=None,
742
+ causal_attention_mask=None,
743
+ output_attentions=output_attentions,
744
+ output_hidden_states=output_hidden_states,
745
+ return_dict=return_dict,
746
+ training=training,
747
+ )
748
+
749
+ sequence_output = encoder_outputs[0]
750
+ pooled_output = sequence_output[:, 0, :]
751
+ pooled_output = self.post_layernorm(inputs=pooled_output)
752
+
753
+ if not return_dict:
754
+ return (sequence_output, pooled_output) + encoder_outputs[1:]
755
+
756
+ return TFBaseModelOutputWithPooling(
757
+ last_hidden_state=sequence_output,
758
+ pooler_output=pooled_output,
759
+ hidden_states=encoder_outputs.hidden_states,
760
+ attentions=encoder_outputs.attentions,
761
+ )
762
+
763
+ def build(self, input_shape=None):
764
+ if self.built:
765
+ return
766
+ self.built = True
767
+ if getattr(self, "embeddings", None) is not None:
768
+ with tf.name_scope(self.embeddings.name):
769
+ self.embeddings.build(None)
770
+ if getattr(self, "pre_layernorm", None) is not None:
771
+ with tf.name_scope(self.pre_layernorm.name):
772
+ self.pre_layernorm.build([None, None, self.embed_dim])
773
+ if getattr(self, "encoder", None) is not None:
774
+ with tf.name_scope(self.encoder.name):
775
+ self.encoder.build(None)
776
+ if getattr(self, "post_layernorm", None) is not None:
777
+ with tf.name_scope(self.post_layernorm.name):
778
+ self.post_layernorm.build([None, self.embed_dim])
779
+
780
+
781
+ @keras_serializable
782
+ class TFCLIPVisionMainLayer(keras.layers.Layer):
783
+ config_class = CLIPVisionConfig
784
+
785
+ def __init__(self, config: CLIPVisionConfig, **kwargs):
786
+ super().__init__(**kwargs)
787
+ self.config = config
788
+ self.vision_model = TFCLIPVisionTransformer(config, name="vision_model")
789
+
790
+ def get_input_embeddings(self) -> keras.layers.Layer:
791
+ return self.vision_model.embeddings
792
+
793
+ @unpack_inputs
794
+ def call(
795
+ self,
796
+ pixel_values: TFModelInputType | None = None,
797
+ output_attentions: Optional[bool] = None,
798
+ output_hidden_states: Optional[bool] = None,
799
+ return_dict: Optional[bool] = None,
800
+ training: bool = False,
801
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
802
+ if pixel_values is None:
803
+ raise ValueError("You have to specify pixel_values")
804
+
805
+ vision_model_outputs = self.vision_model(
806
+ pixel_values=pixel_values,
807
+ output_attentions=output_attentions,
808
+ output_hidden_states=output_hidden_states,
809
+ return_dict=return_dict,
810
+ training=training,
811
+ )
812
+
813
+ return vision_model_outputs
814
+
815
+ def build(self, input_shape=None):
816
+ if self.built:
817
+ return
818
+ self.built = True
819
+ if getattr(self, "vision_model", None) is not None:
820
+ with tf.name_scope(self.vision_model.name):
821
+ self.vision_model.build(None)
822
+
823
+
824
+ @keras_serializable
825
+ class TFCLIPMainLayer(keras.layers.Layer):
826
+ config_class = CLIPConfig
827
+
828
+ def __init__(self, config: CLIPConfig, **kwargs):
829
+ super().__init__(**kwargs)
830
+
831
+ if not isinstance(config.text_config, CLIPTextConfig):
832
+ raise ValueError(
833
+ "config.text_config is expected to be of type CLIPTextConfig but is of type"
834
+ f" {type(config.text_config)}."
835
+ )
836
+
837
+ if not isinstance(config.vision_config, CLIPVisionConfig):
838
+ raise ValueError(
839
+ "config.vision_config is expected to be of type CLIPVisionConfig but is of type"
840
+ f" {type(config.vision_config)}."
841
+ )
842
+
843
+ self.config = config
844
+
845
+ text_config = config.text_config
846
+ vision_config = config.vision_config
847
+
848
+ self.projection_dim = config.projection_dim
849
+
850
+ self.text_model = TFCLIPTextTransformer(text_config, name="text_model")
851
+ self.vision_model = TFCLIPVisionTransformer(vision_config, name="vision_model")
852
+
853
+ self.visual_projection = keras.layers.Dense(
854
+ units=self.projection_dim,
855
+ kernel_initializer=get_initializer(vision_config.hidden_size**-0.5 * self.config.initializer_factor),
856
+ use_bias=False,
857
+ name="visual_projection",
858
+ )
859
+
860
+ self.text_projection = keras.layers.Dense(
861
+ units=self.projection_dim,
862
+ kernel_initializer=get_initializer(text_config.hidden_size**-0.5 * self.config.initializer_factor),
863
+ use_bias=False,
864
+ name="text_projection",
865
+ )
866
+ self.text_embed_dim = text_config.hidden_size
867
+ self.vision_embed_dim = vision_config.hidden_size
868
+
869
+ def build(self, input_shape: tf.TensorShape = None):
870
+ self.logit_scale = self.add_weight(
871
+ shape=(1,),
872
+ initializer=keras.initializers.Constant(self.config.logit_scale_init_value),
873
+ trainable=True,
874
+ name="logit_scale",
875
+ )
876
+
877
+ if self.built:
878
+ return
879
+ self.built = True
880
+ if getattr(self, "text_model", None) is not None:
881
+ with tf.name_scope(self.text_model.name):
882
+ self.text_model.build(None)
883
+ if getattr(self, "vision_model", None) is not None:
884
+ with tf.name_scope(self.vision_model.name):
885
+ self.vision_model.build(None)
886
+ if getattr(self, "visual_projection", None) is not None:
887
+ with tf.name_scope(self.visual_projection.name):
888
+ self.visual_projection.build([None, None, self.vision_embed_dim])
889
+ if getattr(self, "text_projection", None) is not None:
890
+ with tf.name_scope(self.text_projection.name):
891
+ self.text_projection.build([None, None, self.text_embed_dim])
892
+
893
+ @unpack_inputs
894
+ def get_text_features(
895
+ self,
896
+ input_ids: TFModelInputType | None = None,
897
+ attention_mask: np.ndarray | tf.Tensor | None = None,
898
+ position_ids: np.ndarray | tf.Tensor | None = None,
899
+ output_attentions: Optional[bool] = None,
900
+ output_hidden_states: Optional[bool] = None,
901
+ return_dict: Optional[bool] = None,
902
+ training: bool = False,
903
+ ) -> tf.Tensor:
904
+ if input_ids is None:
905
+ raise ValueError("You have to specify either input_ids")
906
+
907
+ input_shape = shape_list(input_ids)
908
+
909
+ if attention_mask is None:
910
+ attention_mask = tf.fill(dims=input_shape, value=1)
911
+
912
+ text_outputs = self.text_model(
913
+ input_ids=input_ids,
914
+ attention_mask=attention_mask,
915
+ position_ids=position_ids,
916
+ output_attentions=output_attentions,
917
+ output_hidden_states=output_hidden_states,
918
+ return_dict=return_dict,
919
+ training=training,
920
+ )
921
+
922
+ pooled_output = text_outputs[1]
923
+ text_features = self.text_projection(inputs=pooled_output)
924
+
925
+ return text_features
926
+
927
+ @unpack_inputs
928
+ def get_image_features(
929
+ self,
930
+ pixel_values: TFModelInputType | None = None,
931
+ output_attentions: Optional[bool] = None,
932
+ output_hidden_states: Optional[bool] = None,
933
+ return_dict: Optional[bool] = None,
934
+ training: bool = False,
935
+ ) -> tf.Tensor:
936
+ if pixel_values is None:
937
+ raise ValueError("You have to specify pixel_values")
938
+
939
+ vision_outputs = self.vision_model(
940
+ pixel_values=pixel_values,
941
+ output_attentions=output_attentions,
942
+ output_hidden_states=output_hidden_states,
943
+ return_dict=return_dict,
944
+ training=training,
945
+ )
946
+
947
+ pooled_output = vision_outputs[1] # pooled_output
948
+ image_features = self.visual_projection(inputs=pooled_output)
949
+
950
+ return image_features
951
+
952
+ @unpack_inputs
953
+ def call(
954
+ self,
955
+ input_ids: TFModelInputType | None = None,
956
+ pixel_values: TFModelInputType | None = None,
957
+ attention_mask: np.ndarray | tf.Tensor | None = None,
958
+ position_ids: np.ndarray | tf.Tensor | None = None,
959
+ return_loss: Optional[bool] = None,
960
+ output_attentions: Optional[bool] = None,
961
+ output_hidden_states: Optional[bool] = None,
962
+ return_dict: Optional[bool] = None,
963
+ training: bool = False,
964
+ ) -> Union[TFCLIPOutput, Tuple[tf.Tensor]]:
965
+ if input_ids is None:
966
+ raise ValueError("You have to specify either input_ids")
967
+ if pixel_values is None:
968
+ raise ValueError("You have to specify pixel_values")
969
+
970
+ input_shape = shape_list(input_ids)
971
+
972
+ if attention_mask is None:
973
+ attention_mask = tf.fill(dims=input_shape, value=1)
974
+
975
+ vision_outputs = self.vision_model(
976
+ pixel_values=pixel_values,
977
+ output_attentions=output_attentions,
978
+ output_hidden_states=output_hidden_states,
979
+ return_dict=return_dict,
980
+ training=training,
981
+ )
982
+
983
+ text_outputs = self.text_model(
984
+ input_ids=input_ids,
985
+ attention_mask=attention_mask,
986
+ position_ids=position_ids,
987
+ output_attentions=output_attentions,
988
+ output_hidden_states=output_hidden_states,
989
+ return_dict=return_dict,
990
+ training=training,
991
+ )
992
+
993
+ image_embeds = vision_outputs[1]
994
+ image_embeds = self.visual_projection(inputs=image_embeds)
995
+
996
+ text_embeds = text_outputs[1]
997
+ text_embeds = self.text_projection(inputs=text_embeds)
998
+
999
+ # normalized features
1000
+ image_embeds = image_embeds / tf.norm(tensor=image_embeds, ord="euclidean", axis=-1, keepdims=True)
1001
+ text_embeds = text_embeds / tf.norm(tensor=text_embeds, ord="euclidean", axis=-1, keepdims=True)
1002
+
1003
+ # cosine similarity as logits
1004
+ logit_scale = tf.math.exp(self.logit_scale)
1005
+ logits_per_text = tf.matmul(text_embeds, image_embeds, transpose_b=True) * logit_scale
1006
+ logits_per_image = tf.transpose(logits_per_text)
1007
+
1008
+ loss = None
1009
+ if return_loss:
1010
+ loss = clip_loss(logits_per_text)
1011
+ loss = tf.reshape(loss, (1,))
1012
+
1013
+ if not return_dict:
1014
+ output = (logits_per_image, logits_per_text, text_embeds, image_embeds, text_outputs, vision_outputs)
1015
+ return (loss,) + output if loss is not None else output
1016
+
1017
+ return TFCLIPOutput(
1018
+ loss=loss,
1019
+ logits_per_image=logits_per_image,
1020
+ logits_per_text=logits_per_text,
1021
+ text_embeds=text_embeds,
1022
+ image_embeds=image_embeds,
1023
+ text_model_output=text_outputs,
1024
+ vision_model_output=vision_outputs,
1025
+ )
1026
+
1027
+
1028
+ class TFCLIPPreTrainedModel(TFPreTrainedModel):
1029
+ """
1030
+ An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
1031
+ models.
1032
+ """
1033
+
1034
+ config_class = CLIPConfig
1035
+ base_model_prefix = "clip"
1036
+ _keys_to_ignore_on_load_missing = [r"position_ids"]
1037
+ _keys_to_ignore_on_load_unexpected = [r"position_ids"]
1038
+
1039
+
1040
+ CLIP_START_DOCSTRING = r"""
1041
+
1042
+ This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
1043
+ library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
1044
+ etc.)
1045
+
1046
+ This model is also a [keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
1047
+ as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
1048
+ behavior.
1049
+
1050
+ <Tip>
1051
+
1052
+ TensorFlow models and layers in `transformers` accept two formats as input:
1053
+
1054
+ - having all inputs as keyword arguments (like PyTorch models), or
1055
+ - having all inputs as a list, tuple or dict in the first positional argument.
1056
+
1057
+ The reason the second format is supported is that Keras methods prefer this format when passing inputs to models
1058
+ and layers. Because of this support, when using methods like `model.fit()` things should "just work" for you - just
1059
+ pass your inputs and labels in any format that `model.fit()` supports! If, however, you want to use the second
1060
+ format outside of Keras methods like `fit()` and `predict()`, such as when creating your own layers or models with
1061
+ the Keras `Functional` API, there are three possibilities you can use to gather all the input Tensors in the first
1062
+ positional argument:
1063
+
1064
+ - a single Tensor with `input_ids` only and nothing else: `model(input_ids)`
1065
+ - a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
1066
+ `model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
1067
+ - a dictionary with one or several input Tensors associated to the input names given in the docstring:
1068
+ `model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
1069
+
1070
+ Note that when creating models and layers with
1071
+ [subclassing](https://keras.io/guides/making_new_layers_and_models_via_subclassing/) then you don't need to worry
1072
+ about any of this, as you can just pass inputs like you would to any other Python function!
1073
+
1074
+ </Tip>
1075
+
1076
+ Args:
1077
+ config ([`CLIPConfig`]): Model configuration class with all the parameters of the model.
1078
+ Initializing with a config file does not load the weights associated with the model, only the
1079
+ configuration. Check out the [`~TFPreTrainedModel.from_pretrained`] method to load the model weights.
1080
+ """
1081
+
1082
+ CLIP_TEXT_INPUTS_DOCSTRING = r"""
1083
+ Args:
1084
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1085
+ Indices of input sequence tokens in the vocabulary.
1086
+
1087
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1088
+ [`PreTrainedTokenizer.encode`] for details.
1089
+
1090
+ [What are input IDs?](../glossary#input-ids)
1091
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1092
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1093
+
1094
+ - 1 for tokens that are **not masked**,
1095
+ - 0 for tokens that are **masked**.
1096
+
1097
+ [What are attention masks?](../glossary#attention-mask)
1098
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1099
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1100
+ config.max_position_embeddings - 1]`.
1101
+
1102
+ [What are position IDs?](../glossary#position-ids)
1103
+ output_attentions (`bool`, *optional*):
1104
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1105
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1106
+ config will be used instead.
1107
+ output_hidden_states (`bool`, *optional*):
1108
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1109
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1110
+ used instead.
1111
+ return_dict (`bool`, *optional*):
1112
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1113
+ eager mode, in graph mode the value will always be set to True.
1114
+ training (`bool`, *optional*, defaults to `False``):
1115
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1116
+ behaviors between training and evaluation).
1117
+ """
1118
+
1119
+ CLIP_VISION_INPUTS_DOCSTRING = r"""
1120
+ Args:
1121
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
1122
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
1123
+ [`CLIPImageProcessor.__call__`] for details. output_attentions (`bool`, *optional*): Whether or not to
1124
+ return the attentions tensors of all attention layers. See `attentions` under returned tensors for more
1125
+ detail. This argument can be used only in eager mode, in graph mode the value in the config will be used
1126
+ instead.
1127
+ output_hidden_states (`bool`, *optional*):
1128
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1129
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1130
+ used instead.
1131
+ return_dict (`bool`, *optional*):
1132
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1133
+ eager mode, in graph mode the value will always be set to True.
1134
+ training (`bool`, *optional*, defaults to `False``):
1135
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1136
+ behaviors between training and evaluation).
1137
+ """
1138
+
1139
+ CLIP_INPUTS_DOCSTRING = r"""
1140
+ Args:
1141
+ input_ids (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` ``Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `({0})`):
1142
+ Indices of input sequence tokens in the vocabulary.
1143
+
1144
+ Indices can be obtained using [`AutoTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
1145
+ [`PreTrainedTokenizer.encode`] for details.
1146
+
1147
+ [What are input IDs?](../glossary#input-ids)
1148
+ pixel_values (`np.ndarray`, `tf.Tensor`, `List[tf.Tensor]` `Dict[str, tf.Tensor]` or `Dict[str, np.ndarray]` and each example must have the shape `(batch_size, num_channels, height, width)`):
1149
+ Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See
1150
+ [`CLIPImageProcessor.__call__`] for details.
1151
+ attention_mask (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1152
+ Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
1153
+
1154
+ - 1 for tokens that are **not masked**,
1155
+ - 0 for tokens that are **masked**.
1156
+
1157
+ [What are attention masks?](../glossary#attention-mask)
1158
+ position_ids (`np.ndarray` or `tf.Tensor` of shape `({0})`, *optional*):
1159
+ Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
1160
+ config.max_position_embeddings - 1]`.
1161
+
1162
+ [What are position IDs?](../glossary#position-ids)
1163
+ return_loss (`bool`, *optional*):
1164
+ Whether or not to return the contrastive loss.
1165
+ output_attentions (`bool`, *optional*):
1166
+ Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
1167
+ tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
1168
+ config will be used instead.
1169
+ output_hidden_states (`bool`, *optional*):
1170
+ Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
1171
+ more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
1172
+ used instead.
1173
+ return_dict (`bool`, *optional*):
1174
+ Whether or not to return a [`~utils.ModelOutput`] instead of a plain tuple. This argument can be used in
1175
+ eager mode, in graph mode the value will always be set to True.
1176
+ training (`bool`, *optional*, defaults to `False``):
1177
+ Whether or not to use the model in training mode (some modules like dropout modules have different
1178
+ behaviors between training and evaluation).
1179
+ """
1180
+
1181
+
1182
+ class TFCLIPTextModel(TFCLIPPreTrainedModel):
1183
+ config_class = CLIPTextConfig
1184
+
1185
+ def __init__(self, config: CLIPTextConfig, *inputs, **kwargs):
1186
+ super().__init__(config, *inputs, **kwargs)
1187
+
1188
+ self.clip = TFCLIPTextMainLayer(config, name="clip")
1189
+
1190
+ @unpack_inputs
1191
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1192
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPTextConfig)
1193
+ def call(
1194
+ self,
1195
+ input_ids: TFModelInputType | None = None,
1196
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1197
+ position_ids: np.ndarray | tf.Tensor | None = None,
1198
+ output_attentions: Optional[bool] = None,
1199
+ output_hidden_states: Optional[bool] = None,
1200
+ return_dict: Optional[bool] = None,
1201
+ training: Optional[bool] = False,
1202
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1203
+ r"""
1204
+ Returns:
1205
+
1206
+ Examples:
1207
+
1208
+ ```python
1209
+ >>> from transformers import AutoTokenizer, TFCLIPTextModel
1210
+
1211
+ >>> model = TFCLIPTextModel.from_pretrained("openai/clip-vit-base-patch32")
1212
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
1213
+
1214
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
1215
+
1216
+ >>> outputs = model(**inputs)
1217
+ >>> last_hidden_state = outputs.last_hidden_state
1218
+ >>> pooled_output = outputs.pooler_output # pooled (EOS token) states
1219
+ ```"""
1220
+
1221
+ outputs = self.clip(
1222
+ input_ids=input_ids,
1223
+ attention_mask=attention_mask,
1224
+ position_ids=position_ids,
1225
+ output_attentions=output_attentions,
1226
+ output_hidden_states=output_hidden_states,
1227
+ return_dict=return_dict,
1228
+ training=training,
1229
+ )
1230
+
1231
+ return outputs
1232
+
1233
+ def build(self, input_shape=None):
1234
+ if self.built:
1235
+ return
1236
+ self.built = True
1237
+ if getattr(self, "clip", None) is not None:
1238
+ with tf.name_scope(self.clip.name):
1239
+ self.clip.build(None)
1240
+
1241
+
1242
+ class TFCLIPVisionModel(TFCLIPPreTrainedModel):
1243
+ config_class = CLIPVisionConfig
1244
+ main_input_name = "pixel_values"
1245
+
1246
+ def __init__(self, config: CLIPVisionConfig, *inputs, **kwargs):
1247
+ super().__init__(config, *inputs, **kwargs)
1248
+
1249
+ self.clip = TFCLIPVisionMainLayer(config, name="clip")
1250
+
1251
+ @unpack_inputs
1252
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
1253
+ @replace_return_docstrings(output_type=TFBaseModelOutputWithPooling, config_class=CLIPVisionConfig)
1254
+ def call(
1255
+ self,
1256
+ pixel_values: TFModelInputType | None = None,
1257
+ output_attentions: Optional[bool] = None,
1258
+ output_hidden_states: Optional[bool] = None,
1259
+ return_dict: Optional[bool] = None,
1260
+ training: Optional[bool] = False,
1261
+ ) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
1262
+ r"""
1263
+ Returns:
1264
+
1265
+ Examples:
1266
+
1267
+ ```python
1268
+ >>> from PIL import Image
1269
+ >>> import requests
1270
+ >>> from transformers import AutoProcessor, TFCLIPVisionModel
1271
+
1272
+ >>> model = TFCLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
1273
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1274
+
1275
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1276
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1277
+
1278
+ >>> inputs = processor(images=image, return_tensors="tf")
1279
+
1280
+ >>> outputs = model(**inputs)
1281
+ >>> last_hidden_state = outputs.last_hidden_state
1282
+ >>> pooled_output = outputs.pooler_output # pooled CLS states
1283
+ ```"""
1284
+
1285
+ outputs = self.clip(
1286
+ pixel_values=pixel_values,
1287
+ output_attentions=output_attentions,
1288
+ output_hidden_states=output_hidden_states,
1289
+ return_dict=return_dict,
1290
+ training=training,
1291
+ )
1292
+
1293
+ return outputs
1294
+
1295
+ def build(self, input_shape=None):
1296
+ if self.built:
1297
+ return
1298
+ self.built = True
1299
+ if getattr(self, "clip", None) is not None:
1300
+ with tf.name_scope(self.clip.name):
1301
+ self.clip.build(None)
1302
+
1303
+
1304
+ @add_start_docstrings(CLIP_START_DOCSTRING)
1305
+ class TFCLIPModel(TFCLIPPreTrainedModel):
1306
+ config_class = CLIPConfig
1307
+
1308
+ def __init__(self, config: CLIPConfig, *inputs, **kwargs):
1309
+ super().__init__(config, *inputs, **kwargs)
1310
+
1311
+ self.clip = TFCLIPMainLayer(config, name="clip")
1312
+
1313
+ @unpack_inputs
1314
+ @add_start_docstrings_to_model_forward(CLIP_TEXT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1315
+ def get_text_features(
1316
+ self,
1317
+ input_ids: TFModelInputType | None = None,
1318
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1319
+ position_ids: np.ndarray | tf.Tensor | None = None,
1320
+ output_attentions: Optional[bool] = None,
1321
+ output_hidden_states: Optional[bool] = None,
1322
+ return_dict: Optional[bool] = None,
1323
+ training: bool = False,
1324
+ ) -> tf.Tensor:
1325
+ r"""
1326
+ Returns:
1327
+ text_features (`tf.Tensor` of shape `(batch_size, output_dim`): The text embeddings obtained by applying
1328
+ the projection layer to the pooled output of [`TFCLIPTextModel`].
1329
+
1330
+ Examples:
1331
+
1332
+ ```python
1333
+ >>> from transformers import AutoTokenizer, TFCLIPModel
1334
+
1335
+ >>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
1336
+ >>> tokenizer = AutoTokenizer.from_pretrained("openai/clip-vit-base-patch32")
1337
+
1338
+ >>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding=True, return_tensors="tf")
1339
+ >>> text_features = model.get_text_features(**inputs)
1340
+ ```"""
1341
+
1342
+ text_features = self.clip.get_text_features(
1343
+ input_ids=input_ids,
1344
+ attention_mask=attention_mask,
1345
+ position_ids=position_ids,
1346
+ output_attentions=output_attentions,
1347
+ output_hidden_states=output_hidden_states,
1348
+ return_dict=return_dict,
1349
+ )
1350
+
1351
+ return text_features
1352
+
1353
+ @unpack_inputs
1354
+ @add_start_docstrings_to_model_forward(CLIP_VISION_INPUTS_DOCSTRING)
1355
+ def get_image_features(
1356
+ self,
1357
+ pixel_values: TFModelInputType | None = None,
1358
+ output_attentions: Optional[bool] = None,
1359
+ output_hidden_states: Optional[bool] = None,
1360
+ return_dict: Optional[bool] = None,
1361
+ training: bool = False,
1362
+ ) -> tf.Tensor:
1363
+ r"""
1364
+ Returns:
1365
+ image_features (`tf.Tensor` of shape `(batch_size, output_dim`): The image embeddings obtained by applying
1366
+ the projection layer to the pooled output of [`TFCLIPVisionModel`].
1367
+
1368
+ Examples:
1369
+
1370
+ ```python
1371
+ >>> from PIL import Image
1372
+ >>> import requests
1373
+ >>> from transformers import AutoProcessor, TFCLIPModel
1374
+
1375
+ >>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
1376
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1377
+
1378
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1379
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1380
+
1381
+ >>> inputs = processor(images=image, return_tensors="tf")
1382
+
1383
+ >>> image_features = model.get_image_features(**inputs)
1384
+ ```"""
1385
+
1386
+ image_features = self.clip.get_image_features(
1387
+ pixel_values=pixel_values,
1388
+ output_attentions=output_attentions,
1389
+ output_hidden_states=output_hidden_states,
1390
+ return_dict=return_dict,
1391
+ )
1392
+
1393
+ return image_features
1394
+
1395
+ @unpack_inputs
1396
+ @add_start_docstrings_to_model_forward(CLIP_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
1397
+ @replace_return_docstrings(output_type=TFCLIPOutput, config_class=CLIPConfig)
1398
+ def call(
1399
+ self,
1400
+ input_ids: TFModelInputType | None = None,
1401
+ pixel_values: TFModelInputType | None = None,
1402
+ attention_mask: np.ndarray | tf.Tensor | None = None,
1403
+ position_ids: np.ndarray | tf.Tensor | None = None,
1404
+ return_loss: Optional[bool] = None,
1405
+ output_attentions: Optional[bool] = None,
1406
+ output_hidden_states: Optional[bool] = None,
1407
+ return_dict: Optional[bool] = None,
1408
+ training: bool = False,
1409
+ ) -> Union[TFCLIPOutput, Tuple[tf.Tensor]]:
1410
+ r"""
1411
+ Returns:
1412
+
1413
+ Examples:
1414
+
1415
+ ```python
1416
+ >>> import tensorflow as tf
1417
+ >>> from PIL import Image
1418
+ >>> import requests
1419
+ >>> from transformers import AutoProcessor, TFCLIPModel
1420
+
1421
+ >>> model = TFCLIPModel.from_pretrained("openai/clip-vit-base-patch32")
1422
+ >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")
1423
+
1424
+ >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
1425
+ >>> image = Image.open(requests.get(url, stream=True).raw)
1426
+
1427
+ >>> inputs = processor(
1428
+ ... text=["a photo of a cat", "a photo of a dog"], images=image, return_tensors="tf", padding=True
1429
+ ... )
1430
+
1431
+ >>> outputs = model(**inputs)
1432
+ >>> logits_per_image = outputs.logits_per_image # this is the image-text similarity score
1433
+ >>> probs = tf.nn.softmax(logits_per_image, axis=1) # we can take the softmax to get the label probabilities
1434
+ ```"""
1435
+
1436
+ outputs = self.clip(
1437
+ input_ids=input_ids,
1438
+ pixel_values=pixel_values,
1439
+ attention_mask=attention_mask,
1440
+ position_ids=position_ids,
1441
+ return_loss=return_loss,
1442
+ output_attentions=output_attentions,
1443
+ output_hidden_states=output_hidden_states,
1444
+ return_dict=return_dict,
1445
+ )
1446
+
1447
+ return outputs
1448
+
1449
+ def serving_output(self, output: TFCLIPOutput) -> TFCLIPOutput:
1450
+ # TODO: As is this currently fails with saved_model=True, because
1451
+ # TensorFlow cannot trace through nested dataclasses. Reference:
1452
+ # https://github.com/huggingface/transformers/pull/16886
1453
+ return output
1454
+
1455
+ def build(self, input_shape=None):
1456
+ if self.built:
1457
+ return
1458
+ self.built = True
1459
+ if getattr(self, "clip", None) is not None:
1460
+ with tf.name_scope(self.clip.name):
1461
+ self.clip.build(None)
venv/lib/python3.10/site-packages/transformers/models/clip/processing_clip.py ADDED
@@ -0,0 +1,153 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """
16
+ Image/Text processor class for CLIP
17
+ """
18
+
19
+ import warnings
20
+
21
+ from ...processing_utils import ProcessorMixin
22
+ from ...tokenization_utils_base import BatchEncoding
23
+
24
+
25
+ class CLIPProcessor(ProcessorMixin):
26
+ r"""
27
+ Constructs a CLIP processor which wraps a CLIP image processor and a CLIP tokenizer into a single processor.
28
+
29
+ [`CLIPProcessor`] offers all the functionalities of [`CLIPImageProcessor`] and [`CLIPTokenizerFast`]. See the
30
+ [`~CLIPProcessor.__call__`] and [`~CLIPProcessor.decode`] for more information.
31
+
32
+ Args:
33
+ image_processor ([`CLIPImageProcessor`], *optional*):
34
+ The image processor is a required input.
35
+ tokenizer ([`CLIPTokenizerFast`], *optional*):
36
+ The tokenizer is a required input.
37
+ """
38
+
39
+ attributes = ["image_processor", "tokenizer"]
40
+ image_processor_class = "CLIPImageProcessor"
41
+ tokenizer_class = ("CLIPTokenizer", "CLIPTokenizerFast")
42
+
43
+ def __init__(self, image_processor=None, tokenizer=None, **kwargs):
44
+ feature_extractor = None
45
+ if "feature_extractor" in kwargs:
46
+ warnings.warn(
47
+ "The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
48
+ " instead.",
49
+ FutureWarning,
50
+ )
51
+ feature_extractor = kwargs.pop("feature_extractor")
52
+
53
+ image_processor = image_processor if image_processor is not None else feature_extractor
54
+ if image_processor is None:
55
+ raise ValueError("You need to specify an `image_processor`.")
56
+ if tokenizer is None:
57
+ raise ValueError("You need to specify a `tokenizer`.")
58
+
59
+ super().__init__(image_processor, tokenizer)
60
+
61
+ def __call__(self, text=None, images=None, return_tensors=None, **kwargs):
62
+ """
63
+ Main method to prepare for the model one or several sequences(s) and image(s). This method forwards the `text`
64
+ and `kwargs` arguments to CLIPTokenizerFast's [`~CLIPTokenizerFast.__call__`] if `text` is not `None` to encode
65
+ the text. To prepare the image(s), this method forwards the `images` and `kwrags` arguments to
66
+ CLIPImageProcessor's [`~CLIPImageProcessor.__call__`] if `images` is not `None`. Please refer to the doctsring
67
+ of the above two methods for more information.
68
+
69
+ Args:
70
+ text (`str`, `List[str]`, `List[List[str]]`):
71
+ The sequence or batch of sequences to be encoded. Each sequence can be a string or a list of strings
72
+ (pretokenized string). If the sequences are provided as list of strings (pretokenized), you must set
73
+ `is_split_into_words=True` (to lift the ambiguity with a batch of sequences).
74
+ images (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`, `List[torch.Tensor]`):
75
+ The image or batch of images to be prepared. Each image can be a PIL image, NumPy array or PyTorch
76
+ tensor. Both channels-first and channels-last formats are supported.
77
+
78
+ return_tensors (`str` or [`~utils.TensorType`], *optional*):
79
+ If set, will return tensors of a particular framework. Acceptable values are:
80
+
81
+ - `'tf'`: Return TensorFlow `tf.constant` objects.
82
+ - `'pt'`: Return PyTorch `torch.Tensor` objects.
83
+ - `'np'`: Return NumPy `np.ndarray` objects.
84
+ - `'jax'`: Return JAX `jnp.ndarray` objects.
85
+
86
+ Returns:
87
+ [`BatchEncoding`]: A [`BatchEncoding`] with the following fields:
88
+
89
+ - **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
90
+ - **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
91
+ `return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
92
+ `None`).
93
+ - **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
94
+ """
95
+ tokenizer_kwargs, image_processor_kwargs = {}, {}
96
+ if kwargs:
97
+ tokenizer_kwargs = {k: v for k, v in kwargs.items() if k not in self.image_processor._valid_processor_keys}
98
+ image_processor_kwargs = {
99
+ k: v for k, v in kwargs.items() if k in self.image_processor._valid_processor_keys
100
+ }
101
+
102
+ if text is None and images is None:
103
+ raise ValueError("You have to specify either text or images. Both cannot be none.")
104
+
105
+ if text is not None:
106
+ encoding = self.tokenizer(text, return_tensors=return_tensors, **tokenizer_kwargs)
107
+
108
+ if images is not None:
109
+ image_features = self.image_processor(images, return_tensors=return_tensors, **image_processor_kwargs)
110
+
111
+ if text is not None and images is not None:
112
+ encoding["pixel_values"] = image_features.pixel_values
113
+ return encoding
114
+ elif text is not None:
115
+ return encoding
116
+ else:
117
+ return BatchEncoding(data=dict(**image_features), tensor_type=return_tensors)
118
+
119
+ def batch_decode(self, *args, **kwargs):
120
+ """
121
+ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.batch_decode`]. Please
122
+ refer to the docstring of this method for more information.
123
+ """
124
+ return self.tokenizer.batch_decode(*args, **kwargs)
125
+
126
+ def decode(self, *args, **kwargs):
127
+ """
128
+ This method forwards all its arguments to CLIPTokenizerFast's [`~PreTrainedTokenizer.decode`]. Please refer to
129
+ the docstring of this method for more information.
130
+ """
131
+ return self.tokenizer.decode(*args, **kwargs)
132
+
133
+ @property
134
+ def model_input_names(self):
135
+ tokenizer_input_names = self.tokenizer.model_input_names
136
+ image_processor_input_names = self.image_processor.model_input_names
137
+ return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names))
138
+
139
+ @property
140
+ def feature_extractor_class(self):
141
+ warnings.warn(
142
+ "`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.",
143
+ FutureWarning,
144
+ )
145
+ return self.image_processor_class
146
+
147
+ @property
148
+ def feature_extractor(self):
149
+ warnings.warn(
150
+ "`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.",
151
+ FutureWarning,
152
+ )
153
+ return self.image_processor
venv/lib/python3.10/site-packages/transformers/models/clip/tokenization_clip.py ADDED
@@ -0,0 +1,516 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2021 The Open AI Team Authors and The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Tokenization classes for CLIP."""
16
+
17
+ import json
18
+ import os
19
+ import unicodedata
20
+ from functools import lru_cache
21
+ from typing import List, Optional, Tuple
22
+
23
+ import regex as re
24
+
25
+ from ...tokenization_utils import AddedToken, PreTrainedTokenizer, _is_control, _is_punctuation, _is_whitespace
26
+ from ...utils import logging
27
+
28
+
29
+ logger = logging.get_logger(__name__)
30
+
31
+ VOCAB_FILES_NAMES = {
32
+ "vocab_file": "vocab.json",
33
+ "merges_file": "merges.txt",
34
+ }
35
+
36
+
37
+ @lru_cache()
38
+ def bytes_to_unicode():
39
+ """
40
+ Returns list of utf-8 byte and a mapping to unicode strings. We specifically avoids mapping to whitespace/control
41
+ characters the bpe code barfs on.
42
+
43
+ The reversible bpe codes work on unicode strings. This means you need a large # of unicode characters in your vocab
44
+ if you want to avoid UNKs. When you're at something like a 10B token dataset you end up needing around 5K for
45
+ decent coverage. This is a significant percentage of your normal, say, 32K bpe vocab. To avoid that, we want lookup
46
+ tables between utf-8 bytes and unicode strings.
47
+ """
48
+ bs = (
49
+ list(range(ord("!"), ord("~") + 1)) + list(range(ord("¡"), ord("¬") + 1)) + list(range(ord("®"), ord("ÿ") + 1))
50
+ )
51
+ cs = bs[:]
52
+ n = 0
53
+ for b in range(2**8):
54
+ if b not in bs:
55
+ bs.append(b)
56
+ cs.append(2**8 + n)
57
+ n += 1
58
+ cs = [chr(n) for n in cs]
59
+ return dict(zip(bs, cs))
60
+
61
+
62
+ def get_pairs(word):
63
+ """
64
+ Return set of symbol pairs in a word.
65
+
66
+ Word is represented as tuple of symbols (symbols being variable-length strings).
67
+ """
68
+ pairs = set()
69
+ prev_char = word[0]
70
+ for char in word[1:]:
71
+ pairs.add((prev_char, char))
72
+ prev_char = char
73
+ return pairs
74
+
75
+
76
+ def whitespace_clean(text):
77
+ text = re.sub(r"\s+", " ", text)
78
+ text = text.strip()
79
+ return text
80
+
81
+
82
+ # Copied from transformers.models.bert.tokenization_bert.whitespace_tokenize
83
+ def whitespace_tokenize(text):
84
+ """Runs basic whitespace cleaning and splitting on a piece of text."""
85
+ text = text.strip()
86
+ if not text:
87
+ return []
88
+ tokens = text.split()
89
+ return tokens
90
+
91
+
92
+ # Copied from transformers.models.bert.tokenization_bert.BasicTokenizer
93
+ class BasicTokenizer(object):
94
+ """
95
+ Constructs a BasicTokenizer that will run basic tokenization (punctuation splitting, lower casing, etc.).
96
+
97
+ Args:
98
+ do_lower_case (`bool`, *optional*, defaults to `True`):
99
+ Whether or not to lowercase the input when tokenizing.
100
+ never_split (`Iterable`, *optional*):
101
+ Collection of tokens which will never be split during tokenization. Only has an effect when
102
+ `do_basic_tokenize=True`
103
+ tokenize_chinese_chars (`bool`, *optional*, defaults to `True`):
104
+ Whether or not to tokenize Chinese characters.
105
+
106
+ This should likely be deactivated for Japanese (see this
107
+ [issue](https://github.com/huggingface/transformers/issues/328)).
108
+ strip_accents (`bool`, *optional*):
109
+ Whether or not to strip all accents. If this option is not specified, then it will be determined by the
110
+ value for `lowercase` (as in the original BERT).
111
+ do_split_on_punc (`bool`, *optional*, defaults to `True`):
112
+ In some instances we want to skip the basic punctuation splitting so that later tokenization can capture
113
+ the full context of the words, such as contractions.
114
+ """
115
+
116
+ def __init__(
117
+ self,
118
+ do_lower_case=True,
119
+ never_split=None,
120
+ tokenize_chinese_chars=True,
121
+ strip_accents=None,
122
+ do_split_on_punc=True,
123
+ ):
124
+ if never_split is None:
125
+ never_split = []
126
+ self.do_lower_case = do_lower_case
127
+ self.never_split = set(never_split)
128
+ self.tokenize_chinese_chars = tokenize_chinese_chars
129
+ self.strip_accents = strip_accents
130
+ self.do_split_on_punc = do_split_on_punc
131
+
132
+ def tokenize(self, text, never_split=None):
133
+ """
134
+ Basic Tokenization of a piece of text. For sub-word tokenization, see WordPieceTokenizer.
135
+
136
+ Args:
137
+ never_split (`List[str]`, *optional*)
138
+ Kept for backward compatibility purposes. Now implemented directly at the base class level (see
139
+ [`PreTrainedTokenizer.tokenize`]) List of token not to split.
140
+ """
141
+ # union() returns a new set by concatenating the two sets.
142
+ never_split = self.never_split.union(set(never_split)) if never_split else self.never_split
143
+ text = self._clean_text(text)
144
+
145
+ # This was added on November 1st, 2018 for the multilingual and Chinese
146
+ # models. This is also applied to the English models now, but it doesn't
147
+ # matter since the English models were not trained on any Chinese data
148
+ # and generally don't have any Chinese data in them (there are Chinese
149
+ # characters in the vocabulary because Wikipedia does have some Chinese
150
+ # words in the English Wikipedia.).
151
+ if self.tokenize_chinese_chars:
152
+ text = self._tokenize_chinese_chars(text)
153
+ # prevents treating the same character with different unicode codepoints as different characters
154
+ unicode_normalized_text = unicodedata.normalize("NFC", text)
155
+ orig_tokens = whitespace_tokenize(unicode_normalized_text)
156
+ split_tokens = []
157
+ for token in orig_tokens:
158
+ if token not in never_split:
159
+ if self.do_lower_case:
160
+ token = token.lower()
161
+ if self.strip_accents is not False:
162
+ token = self._run_strip_accents(token)
163
+ elif self.strip_accents:
164
+ token = self._run_strip_accents(token)
165
+ split_tokens.extend(self._run_split_on_punc(token, never_split))
166
+
167
+ output_tokens = whitespace_tokenize(" ".join(split_tokens))
168
+ return output_tokens
169
+
170
+ def _run_strip_accents(self, text):
171
+ """Strips accents from a piece of text."""
172
+ text = unicodedata.normalize("NFD", text)
173
+ output = []
174
+ for char in text:
175
+ cat = unicodedata.category(char)
176
+ if cat == "Mn":
177
+ continue
178
+ output.append(char)
179
+ return "".join(output)
180
+
181
+ def _run_split_on_punc(self, text, never_split=None):
182
+ """Splits punctuation on a piece of text."""
183
+ if not self.do_split_on_punc or (never_split is not None and text in never_split):
184
+ return [text]
185
+ chars = list(text)
186
+ i = 0
187
+ start_new_word = True
188
+ output = []
189
+ while i < len(chars):
190
+ char = chars[i]
191
+ if _is_punctuation(char):
192
+ output.append([char])
193
+ start_new_word = True
194
+ else:
195
+ if start_new_word:
196
+ output.append([])
197
+ start_new_word = False
198
+ output[-1].append(char)
199
+ i += 1
200
+
201
+ return ["".join(x) for x in output]
202
+
203
+ def _tokenize_chinese_chars(self, text):
204
+ """Adds whitespace around any CJK character."""
205
+ output = []
206
+ for char in text:
207
+ cp = ord(char)
208
+ if self._is_chinese_char(cp):
209
+ output.append(" ")
210
+ output.append(char)
211
+ output.append(" ")
212
+ else:
213
+ output.append(char)
214
+ return "".join(output)
215
+
216
+ def _is_chinese_char(self, cp):
217
+ """Checks whether CP is the codepoint of a CJK character."""
218
+ # This defines a "chinese character" as anything in the CJK Unicode block:
219
+ # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
220
+ #
221
+ # Note that the CJK Unicode block is NOT all Japanese and Korean characters,
222
+ # despite its name. The modern Korean Hangul alphabet is a different block,
223
+ # as is Japanese Hiragana and Katakana. Those alphabets are used to write
224
+ # space-separated words, so they are not treated specially and handled
225
+ # like the all of the other languages.
226
+ if (
227
+ (cp >= 0x4E00 and cp <= 0x9FFF)
228
+ or (cp >= 0x3400 and cp <= 0x4DBF) #
229
+ or (cp >= 0x20000 and cp <= 0x2A6DF) #
230
+ or (cp >= 0x2A700 and cp <= 0x2B73F) #
231
+ or (cp >= 0x2B740 and cp <= 0x2B81F) #
232
+ or (cp >= 0x2B820 and cp <= 0x2CEAF) #
233
+ or (cp >= 0xF900 and cp <= 0xFAFF)
234
+ or (cp >= 0x2F800 and cp <= 0x2FA1F) #
235
+ ): #
236
+ return True
237
+
238
+ return False
239
+
240
+ def _clean_text(self, text):
241
+ """Performs invalid character removal and whitespace cleanup on text."""
242
+ output = []
243
+ for char in text:
244
+ cp = ord(char)
245
+ if cp == 0 or cp == 0xFFFD or _is_control(char):
246
+ continue
247
+ if _is_whitespace(char):
248
+ output.append(" ")
249
+ else:
250
+ output.append(char)
251
+ return "".join(output)
252
+
253
+
254
+ class CLIPTokenizer(PreTrainedTokenizer):
255
+ """
256
+ Construct a CLIP tokenizer. Based on byte-level Byte-Pair-Encoding.
257
+
258
+ This tokenizer inherits from [`PreTrainedTokenizer`] which contains most of the main methods. Users should refer to
259
+ this superclass for more information regarding those methods.
260
+
261
+ Args:
262
+ vocab_file (`str`):
263
+ Path to the vocabulary file.
264
+ merges_file (`str`):
265
+ Path to the merges file.
266
+ errors (`str`, *optional*, defaults to `"replace"`):
267
+ Paradigm to follow when decoding bytes to UTF-8. See
268
+ [bytes.decode](https://docs.python.org/3/library/stdtypes.html#bytes.decode) for more information.
269
+ unk_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
270
+ The unknown token. A token that is not in the vocabulary cannot be converted to an ID and is set to be this
271
+ token instead.
272
+ bos_token (`str`, *optional*, defaults to `"<|startoftext|>"`):
273
+ The beginning of sequence token.
274
+ eos_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
275
+ The end of sequence token.
276
+ pad_token (`str`, *optional*, defaults to `"<|endoftext|>"`):
277
+ The token used for padding, for example when batching sequences of different lengths.
278
+ """
279
+
280
+ vocab_files_names = VOCAB_FILES_NAMES
281
+ model_input_names = ["input_ids", "attention_mask"]
282
+
283
+ def __init__(
284
+ self,
285
+ vocab_file,
286
+ merges_file,
287
+ errors="replace",
288
+ unk_token="<|endoftext|>",
289
+ bos_token="<|startoftext|>",
290
+ eos_token="<|endoftext|>",
291
+ pad_token="<|endoftext|>", # hack to enable padding
292
+ **kwargs,
293
+ ):
294
+ bos_token = AddedToken(bos_token, lstrip=False, rstrip=False) if isinstance(bos_token, str) else bos_token
295
+ eos_token = AddedToken(eos_token, lstrip=False, rstrip=False) if isinstance(eos_token, str) else eos_token
296
+ unk_token = AddedToken(unk_token, lstrip=False, rstrip=False) if isinstance(unk_token, str) else unk_token
297
+ try:
298
+ import ftfy
299
+
300
+ self.fix_text = ftfy.fix_text
301
+ except ImportError:
302
+ logger.info("ftfy or spacy is not installed using custom BasicTokenizer instead of ftfy.")
303
+ self.nlp = BasicTokenizer(strip_accents=False, do_split_on_punc=False)
304
+ self.fix_text = None
305
+
306
+ with open(vocab_file, encoding="utf-8") as vocab_handle:
307
+ self.encoder = json.load(vocab_handle)
308
+ self.decoder = {v: k for k, v in self.encoder.items()}
309
+ self.errors = errors # how to handle errors in decoding
310
+ self.byte_encoder = bytes_to_unicode()
311
+ self.byte_decoder = {v: k for k, v in self.byte_encoder.items()}
312
+ with open(merges_file, encoding="utf-8") as merges_handle:
313
+ bpe_merges = merges_handle.read().strip().split("\n")[1 : 49152 - 256 - 2 + 1]
314
+ bpe_merges = [tuple(merge.split()) for merge in bpe_merges]
315
+ self.bpe_ranks = dict(zip(bpe_merges, range(len(bpe_merges))))
316
+ self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"}
317
+
318
+ self.pat = re.compile(
319
+ r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""",
320
+ re.IGNORECASE,
321
+ )
322
+
323
+ super().__init__(
324
+ errors=errors,
325
+ unk_token=unk_token,
326
+ bos_token=bos_token,
327
+ eos_token=eos_token,
328
+ pad_token=pad_token,
329
+ **kwargs,
330
+ )
331
+
332
+ @property
333
+ def vocab_size(self):
334
+ return len(self.encoder)
335
+
336
+ def get_vocab(self):
337
+ return dict(self.encoder, **self.added_tokens_encoder)
338
+
339
+ def build_inputs_with_special_tokens(
340
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
341
+ ) -> List[int]:
342
+ """
343
+ Build model inputs from a sequence or a pair of sequence for sequence classification tasks by concatenating and
344
+ adding special tokens. A CLIP sequence has the following format:
345
+
346
+ - single sequence: `<|startoftext|> X <|endoftext|>`
347
+
348
+ Pairs of sequences are not the expected use case, but they will be handled without a separator.
349
+
350
+ Args:
351
+ token_ids_0 (`List[int]`):
352
+ List of IDs to which the special tokens will be added.
353
+ token_ids_1 (`List[int]`, *optional*):
354
+ Optional second list of IDs for sequence pairs.
355
+
356
+ Returns:
357
+ `List[int]`: List of [input IDs](../glossary#input-ids) with the appropriate special tokens.
358
+ """
359
+ bos_token = [self.bos_token_id]
360
+ eos_token = [self.eos_token_id]
361
+
362
+ if token_ids_1 is None:
363
+ return bos_token + token_ids_0 + eos_token
364
+ return bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token
365
+
366
+ def get_special_tokens_mask(
367
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None, already_has_special_tokens: bool = False
368
+ ) -> List[int]:
369
+ """
370
+ Retrieve sequence ids from a token list that has no special tokens added. This method is called when adding
371
+ special tokens using the tokenizer `prepare_for_model` method.
372
+
373
+ Args:
374
+ token_ids_0 (`List[int]`):
375
+ List of IDs.
376
+ token_ids_1 (`List[int]`, *optional*):
377
+ Optional second list of IDs for sequence pairs.
378
+ already_has_special_tokens (`bool`, *optional*, defaults to `False`):
379
+ Whether or not the token list is already formatted with special tokens for the model.
380
+
381
+ Returns:
382
+ `List[int]`: A list of integers in the range [0, 1]: 1 for a special token, 0 for a sequence token.
383
+ """
384
+
385
+ if already_has_special_tokens:
386
+ return super().get_special_tokens_mask(
387
+ token_ids_0=token_ids_0, token_ids_1=token_ids_1, already_has_special_tokens=True
388
+ )
389
+
390
+ if token_ids_1 is None:
391
+ return [1] + ([0] * len(token_ids_0)) + [1]
392
+ return [1] + ([0] * len(token_ids_0)) + [1] + [1] + ([0] * len(token_ids_1)) + [1]
393
+
394
+ def create_token_type_ids_from_sequences(
395
+ self, token_ids_0: List[int], token_ids_1: Optional[List[int]] = None
396
+ ) -> List[int]:
397
+ """
398
+ Create a mask from the two sequences passed. CLIP does not make use of token type ids, therefore a list of
399
+ zeros is returned.
400
+
401
+ Args:
402
+ token_ids_0 (`List[int]`):
403
+ List of IDs.
404
+ token_ids_1 (`List[int]`, *optional*):
405
+ Optional second list of IDs for sequence pairs.
406
+
407
+ Returns:
408
+ `List[int]`: List of zeros.
409
+ """
410
+ bos_token = [self.bos_token_id]
411
+ eos_token = [self.eos_token_id]
412
+
413
+ if token_ids_1 is None:
414
+ return len(bos_token + token_ids_0 + eos_token) * [0]
415
+ return len(bos_token + token_ids_0 + eos_token + eos_token + token_ids_1 + eos_token) * [0]
416
+
417
+ def bpe(self, token):
418
+ if token in self.cache:
419
+ return self.cache[token]
420
+ word = tuple(token[:-1]) + (token[-1] + "</w>",)
421
+ pairs = get_pairs(word)
422
+
423
+ if not pairs:
424
+ return token + "</w>"
425
+
426
+ while True:
427
+ bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf")))
428
+ if bigram not in self.bpe_ranks:
429
+ break
430
+ first, second = bigram
431
+ new_word = []
432
+ i = 0
433
+ while i < len(word):
434
+ try:
435
+ j = word.index(first, i)
436
+ except ValueError:
437
+ new_word.extend(word[i:])
438
+ break
439
+ else:
440
+ new_word.extend(word[i:j])
441
+ i = j
442
+
443
+ if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
444
+ new_word.append(first + second)
445
+ i += 2
446
+ else:
447
+ new_word.append(word[i])
448
+ i += 1
449
+ new_word = tuple(new_word)
450
+ word = new_word
451
+ if len(word) == 1:
452
+ break
453
+ else:
454
+ pairs = get_pairs(word)
455
+ word = " ".join(word)
456
+ self.cache[token] = word
457
+ return word
458
+
459
+ def _tokenize(self, text):
460
+ """Tokenize a string."""
461
+ bpe_tokens = []
462
+ if self.fix_text is None:
463
+ text = " ".join(self.nlp.tokenize(text))
464
+ else:
465
+ text = whitespace_clean(self.fix_text(text)).lower()
466
+
467
+ for token in re.findall(self.pat, text):
468
+ token = "".join(
469
+ self.byte_encoder[b] for b in token.encode("utf-8")
470
+ ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
471
+ bpe_tokens.extend(bpe_token for bpe_token in self.bpe(token).split(" "))
472
+ return bpe_tokens
473
+
474
+ def _convert_token_to_id(self, token):
475
+ """Converts a token (str) in an id using the vocab."""
476
+ return self.encoder.get(token, self.encoder.get(self.unk_token))
477
+
478
+ def _convert_id_to_token(self, index):
479
+ """Converts an index (integer) in a token (str) using the vocab."""
480
+ return self.decoder.get(index)
481
+
482
+ def convert_tokens_to_string(self, tokens):
483
+ """Converts a sequence of tokens (string) in a single string."""
484
+ text = "".join(tokens)
485
+ byte_array = bytearray([self.byte_decoder[c] for c in text])
486
+ text = byte_array.decode("utf-8", errors=self.errors).replace("</w>", " ").strip()
487
+ return text
488
+
489
+ def save_vocabulary(self, save_directory: str, filename_prefix: Optional[str] = None) -> Tuple[str]:
490
+ if not os.path.isdir(save_directory):
491
+ logger.error("Vocabulary path ({}) should be a directory".format(save_directory))
492
+ return
493
+ vocab_file = os.path.join(
494
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"]
495
+ )
496
+ merge_file = os.path.join(
497
+ save_directory, (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"]
498
+ )
499
+
500
+ with open(vocab_file, "w", encoding="utf-8") as f:
501
+ f.write(json.dumps(self.encoder, indent=2, sort_keys=True, ensure_ascii=False) + "\n")
502
+
503
+ index = 0
504
+ with open(merge_file, "w", encoding="utf-8") as writer:
505
+ writer.write("#version: 0.2\n")
506
+ for bpe_tokens, token_index in sorted(self.bpe_ranks.items(), key=lambda kv: kv[1]):
507
+ if index != token_index:
508
+ logger.warning(
509
+ "Saving vocabulary to {}: BPE merge indices are not consecutive."
510
+ " Please check that the tokenizer is not corrupted!".format(merge_file)
511
+ )
512
+ index = token_index
513
+ writer.write(" ".join(bpe_tokens) + "\n")
514
+ index += 1
515
+
516
+ return vocab_file, merge_file