Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/fp32.pt +3 -0
- ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/fp32.pt +3 -0
- lm-evaluation-harness/tests/testdata/arc_challenge-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/arithmetic_4da-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/blimp_anaphor_gender_agreement-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_causative-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_2-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_2-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_2-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/blimp_existential_there_object_raising-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_2-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/boolq-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/cb-v1-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/copa-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/crows_pairs_english-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/crows_pairs_english_physical_appearance-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/crows_pairs_english_religion-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/ethics_cm-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/ethics_deontology-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-college_chemistry-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-college_computer_science-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-college_mathematics-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-computer_security-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-elementary_mathematics-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-global_facts-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_chemistry-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_macroeconomics-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_mathematics-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-jurisprudence-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-moral_scenarios-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-professional_medicine-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-professional_psychology-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-security_studies-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/lambada_mt_fr-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/logiqa-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/math_geometry-v0-greedy_until +1 -0
- lm-evaluation-harness/tests/testdata/math_geometry-v1-res.json +1 -0
- lm-evaluation-harness/tests/testdata/math_num_theory-v1-greedy_until +1 -0
- lm-evaluation-harness/tests/testdata/mutual_plus-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/pile_freelaw-v0-loglikelihood_rolling +1 -0
- lm-evaluation-harness/tests/testdata/pile_freelaw-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/pile_freelaw-v1-res.json +1 -0
- lm-evaluation-harness/tests/testdata/pile_github-v1-res.json +1 -0
- lm-evaluation-harness/tests/testdata/pile_hackernews-v1-res.json +1 -0
- lm-evaluation-harness/tests/testdata/pile_nih-exporter-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/pile_openwebtext2-v1-loglikelihood_rolling +1 -0
- lm-evaluation-harness/tests/testdata/rte-v0-loglikelihood +1 -0
.gitattributes
CHANGED
@@ -82,3 +82,4 @@ venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs
|
|
82 |
venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
|
83 |
venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text
|
84 |
venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs diff=lfs merge=lfs -text
|
|
|
|
82 |
venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
|
83 |
venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text
|
84 |
venv/lib/python3.10/site-packages/pyarrow/libarrow.so.1600 filter=lfs diff=lfs merge=lfs -text
|
85 |
+
venv/lib/python3.10/site-packages/regex/_regex.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
ckpts/universal/global_step40/zero/10.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:75db47748b73b247ad0f065867073cd201c141f058f4ac72c19a8e60a02cba3b
|
3 |
+
size 50332749
|
ckpts/universal/global_step40/zero/23.attention.query_key_value.weight/fp32.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:998905bfa7c1c3025b7c15821e97cecdea461a10716950aad773e9d4494a7926
|
3 |
+
size 50332749
|
lm-evaluation-harness/tests/testdata/arc_challenge-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
41c34c96cca8ace661911d0033d630c554b283f5a3953bcdc50720ae6b00a9c1
|
lm-evaluation-harness/tests/testdata/arithmetic_4da-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"arithmetic_4da": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_4da": 0}}
|
lm-evaluation-harness/tests/testdata/blimp_anaphor_gender_agreement-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
2d8964e56a17661502ecf3f09c0befba63915360ddf2145b0bd845816950515d
|
lm-evaluation-harness/tests/testdata/blimp_causative-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3d67ad025185dbb0808ebd7f508edcb5750c18fc3c01ad91f20fda80780c916c
|
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_2-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
95acb74fac7d57ae2c9d208361a5f8ad36b0b19a055f02e648ed8e99505f4b43
|
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ad61c619aa79433d02f1aeacde2ab87291fd5d5c370032c24d41c4f0065ed1f9
|
lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_2-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
0523771a217759f0b22b89807694ee7f6381ce98a584b1fd070ba96194a3273b
|
lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_2-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_ellipsis_n_bar_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_ellipsis_n_bar_2": 0}}
|
lm-evaluation-harness/tests/testdata/blimp_existential_there_object_raising-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
63567712076256f373131971676c1c6d711efef73cd0e4de3cc639bc631a2413
|
lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
5bc0441f31e32443cf761bca6e961d504e1e84b15aa4e1d79e5c8ed5b4c2aa3a
|
lm-evaluation-harness/tests/testdata/blimp_superlative_quantifiers_2-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_superlative_quantifiers_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_superlative_quantifiers_2": 0}}
|
lm-evaluation-harness/tests/testdata/boolq-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
de5aa6f77a2e0fd050b9c272f10c4d5d5581e4f75ffa60926f79e60ae1738960
|
lm-evaluation-harness/tests/testdata/cb-v1-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
77b11f4348eb8a7f57faf95c531fda01ab4bf0e729f91a82451ed8e71ec8e66d
|
lm-evaluation-harness/tests/testdata/copa-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
66276b9045b5300cba4b81340db06f674f031fa0b8883714ad0d03be464cd799
|
lm-evaluation-harness/tests/testdata/crows_pairs_english-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ee3ce1ddb8071d4189e5b06e7f3c618a434221ac52935d0f434c4d183f01458a
|
lm-evaluation-harness/tests/testdata/crows_pairs_english_physical_appearance-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"crows_pairs_english_physical_appearance": {"likelihood_difference": 0.3221673223187262, "likelihood_difference_stderr": 0.026978346460100555, "pct_stereotype": 0.4027777777777778, "pct_stereotype_stderr": 0.05820650942569533}}, "versions": {"crows_pairs_english_physical_appearance": 0}}
|
lm-evaluation-harness/tests/testdata/crows_pairs_english_religion-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"crows_pairs_english_religion": {"likelihood_difference": 0.32170622542430666, "likelihood_difference_stderr": 0.022101541392310232, "pct_stereotype": 0.43243243243243246, "pct_stereotype_stderr": 0.04723583229758394}}, "versions": {"crows_pairs_english_religion": 0}}
|
lm-evaluation-harness/tests/testdata/ethics_cm-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"ethics_cm": {"acc": 0.49987129987129986, "acc_stderr": 0.008022881531793336}}, "versions": {"ethics_cm": 0}}
|
lm-evaluation-harness/tests/testdata/ethics_deontology-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
74ecebe322457d70afc16fde848978410a09b854dc65c47f428d100bd1593248
|
lm-evaluation-harness/tests/testdata/hendrycksTest-college_chemistry-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
044752b21540db95118b8cbe7e75c4c9b8758e27df56543deaeadec7f749a28d
|
lm-evaluation-harness/tests/testdata/hendrycksTest-college_computer_science-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
4ea26ad780290429ac5a3317559c154848d662bd40532c966458ba6f2a32d0a3
|
lm-evaluation-harness/tests/testdata/hendrycksTest-college_mathematics-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-college_mathematics": {"acc": 0.18, "acc_norm": 0.2, "acc_norm_stderr": 0.04020151261036844, "acc_stderr": 0.038612291966536955}}, "versions": {"hendrycksTest-college_mathematics": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-computer_security-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
a8a1892d1906cc3e7ffd321043f0a60f3b8b69ef76e5c6ff03c6ea41dc87d0cb
|
lm-evaluation-harness/tests/testdata/hendrycksTest-elementary_mathematics-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
6b21f5cd5606268421a667152ec989424b66905c02adbab8d4ff6bb9d21b77d1
|
lm-evaluation-harness/tests/testdata/hendrycksTest-global_facts-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
9fdc85240b8170839278b1e883ee0868611d84dce202cb8aa037c841ec76d089
|
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_chemistry-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-high_school_chemistry": {"acc": 0.2857142857142857, "acc_norm": 0.2660098522167488, "acc_norm_stderr": 0.031089826002937523, "acc_stderr": 0.031785297106427496}}, "versions": {"hendrycksTest-high_school_chemistry": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_macroeconomics-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ce4faae2fb6628caa48f6fc74cbc848880db49e6ff51079392778a2322bcefef
|
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_mathematics-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ab368d16fc4648ad27940f71abd266366663f51db612f732a0b9b0eea28de9f8
|
lm-evaluation-harness/tests/testdata/hendrycksTest-jurisprudence-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-jurisprudence": {"acc": 0.25, "acc_norm": 0.3148148148148148, "acc_norm_stderr": 0.04489931073591312, "acc_stderr": 0.04186091791394607}}, "versions": {"hendrycksTest-jurisprudence": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-moral_scenarios-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-moral_scenarios": {"acc": 0.2547486033519553, "acc_norm": 0.25251396648044694, "acc_norm_stderr": 0.014530330201468654, "acc_stderr": 0.014572650383409158}}, "versions": {"hendrycksTest-moral_scenarios": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-professional_medicine-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-professional_medicine": {"acc": 0.23161764705882354, "acc_norm": 0.2536764705882353, "acc_norm_stderr": 0.02643132987078953, "acc_stderr": 0.025626533803777562}}, "versions": {"hendrycksTest-professional_medicine": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-professional_psychology-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
92a5fad6e9ec700f84946faeccd399dda3569fb71837c9fb0c5c87f5ec29c43e
|
lm-evaluation-harness/tests/testdata/hendrycksTest-security_studies-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-security_studies": {"acc": 0.2979591836734694, "acc_norm": 0.2693877551020408, "acc_norm_stderr": 0.02840125202902294, "acc_stderr": 0.029279567411065674}}, "versions": {"hendrycksTest-security_studies": 0}}
|
lm-evaluation-harness/tests/testdata/lambada_mt_fr-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
5d16f4a0c51dc6d7b6df2ebeba2bbfa51e700b843779b559b3d90183d7b02a11
|
lm-evaluation-harness/tests/testdata/logiqa-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"logiqa": {"acc": 0.25806451612903225, "acc_norm": 0.2764976958525346, "acc_norm_stderr": 0.017543209075825194, "acc_stderr": 0.017162894755127077}}, "versions": {"logiqa": 0}}
|
lm-evaluation-harness/tests/testdata/math_geometry-v0-greedy_until
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
46bc4cb219b6903397da782699a684bdbb982c0c954ff82e6beeed5c84878f42
|
lm-evaluation-harness/tests/testdata/math_geometry-v1-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"math_geometry": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_geometry": 1}}
|
lm-evaluation-harness/tests/testdata/math_num_theory-v1-greedy_until
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
b920ccb507afdcf3ef6f4c04891913731e9f32ec914801791c6d9f8abf6e1897
|
lm-evaluation-harness/tests/testdata/mutual_plus-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
b846bb9db109535f59a93d1ce340cf09f68bdf4fed5b8decd168784220fe07fa
|
lm-evaluation-harness/tests/testdata/pile_freelaw-v0-loglikelihood_rolling
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
d77f3f68aadd6cbf1290c2f6737b2ed5d5c2a60e4c81a65c280f207783caabe1
|
lm-evaluation-harness/tests/testdata/pile_freelaw-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"pile_freelaw": {"bits_per_byte": 3.16238943008513e-05, "byte_perplexity": 1.0000316243943415, "word_perplexity": 1.000203169094218}}, "versions": {"pile_freelaw": 0}}
|
lm-evaluation-harness/tests/testdata/pile_freelaw-v1-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"pile_freelaw": {"bits_per_byte": 4.5623635481434923e-05, "byte_perplexity": 1.0000316243943415, "word_perplexity": 1.000203169094218}}, "versions": {"pile_freelaw": 1}}
|
lm-evaluation-harness/tests/testdata/pile_github-v1-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"pile_github": {"bits_per_byte": 0.00013764216145332133, "byte_perplexity": 1.0000954108274611, "word_perplexity": 1.0009643183931227}}, "versions": {"pile_github": 1}}
|
lm-evaluation-harness/tests/testdata/pile_hackernews-v1-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"pile_hackernews": {"bits_per_byte": 0.00014672607267878518, "byte_perplexity": 1.0001017079354932, "word_perplexity": 1.0006273924348839}}, "versions": {"pile_hackernews": 1}}
|
lm-evaluation-harness/tests/testdata/pile_nih-exporter-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"pile_nih-exporter": {"bits_per_byte": 0.00024394433346975716, "byte_perplexity": 1.0002439740903082, "word_perplexity": 1.0016712202288802}}, "versions": {"pile_nih-exporter": 0}}
|
lm-evaluation-harness/tests/testdata/pile_openwebtext2-v1-loglikelihood_rolling
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
5d6c19665f429ab1ccbe027da67f42bdaf219f819ab093673976eee55e015ff4
|
lm-evaluation-harness/tests/testdata/rte-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
c80ce13c8c736087f1557f8736d5d318b540ff01e4bb7f55e568890dc8b0393e
|