applied-ai-018 commited on
Commit
2d9dc5b
·
verified ·
1 Parent(s): 5900e96

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. ckpts/universal/global_step20/mp_rank_03_model_states.pt +3 -0
  3. ckpts/universal/global_step20/mp_rank_04_model_states.pt +3 -0
  4. ckpts/universal/global_step20/mp_rank_05_model_states.pt +3 -0
  5. ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  6. lm-evaluation-harness/tests/testdata/anagrams2-v0-greedy_until +1 -0
  7. lm-evaluation-harness/tests/testdata/arc_challenge-v2.0-res.json +1 -0
  8. lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-loglikelihood +1 -0
  9. lm-evaluation-harness/tests/testdata/arithmetic_2ds-v0-loglikelihood +1 -0
  10. lm-evaluation-harness/tests/testdata/arithmetic_3da-v0-res.json +1 -0
  11. lm-evaluation-harness/tests/testdata/blimp_anaphor_gender_agreement-v0-res.json +1 -0
  12. lm-evaluation-harness/tests/testdata/blimp_anaphor_number_agreement-v0-res.json +1 -0
  13. lm-evaluation-harness/tests/testdata/blimp_complex_NP_island-v0-loglikelihood +1 -0
  14. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-res.json +1 -0
  15. lm-evaluation-harness/tests/testdata/blimp_intransitive-v0-res.json +1 -0
  16. lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json +1 -0
  17. lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-res.json +1 -0
  18. lm-evaluation-harness/tests/testdata/crows_pairs_english_sexual_orientation-v0-loglikelihood +1 -0
  19. lm-evaluation-harness/tests/testdata/crows_pairs_french_disability-v0-loglikelihood +1 -0
  20. lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-loglikelihood +1 -0
  21. lm-evaluation-harness/tests/testdata/drop-v1-res.json +1 -0
  22. lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-loglikelihood +1 -0
  23. lm-evaluation-harness/tests/testdata/hendrycksTest-conceptual_physics-v0-res.json +1 -0
  24. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-res.json +1 -0
  25. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_world_history-v0-loglikelihood +1 -0
  26. lm-evaluation-harness/tests/testdata/hendrycksTest-logical_fallacies-v0-loglikelihood +1 -0
  27. lm-evaluation-harness/tests/testdata/hendrycksTest-prehistory-v0-loglikelihood +1 -0
  28. lm-evaluation-harness/tests/testdata/lambada_openai-v2.0-res.json +1 -0
  29. lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-loglikelihood +1 -0
  30. lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-res.json +1 -0
  31. lm-evaluation-harness/tests/testdata/math_algebra-v1-res.json +1 -0
  32. lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v0-greedy_until +1 -0
  33. lm-evaluation-harness/tests/testdata/pile_enron-v1-loglikelihood_rolling +1 -0
  34. lm-evaluation-harness/tests/testdata/pile_github-v0-res.json +1 -0
  35. lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v0-res.json +1 -0
  36. lm-evaluation-harness/tests/testdata/pile_uspto-v1-res.json +1 -0
  37. lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v0-res.json +1 -0
  38. lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v1-res.json +1 -0
  39. lm-evaluation-harness/tests/testdata/sciq-v0-loglikelihood +1 -0
  40. lm-evaluation-harness/tests/testdata/squad2-v1-greedy_until +1 -0
  41. lm-evaluation-harness/tests/testdata/sst-v0-res.json +1 -0
  42. lm-evaluation-harness/tests/testdata/swag-v0-loglikelihood +1 -0
  43. lm-evaluation-harness/tests/testdata/triviaqa-v0-res.json +1 -0
  44. lm-evaluation-harness/tests/testdata/wic-v0-res.json +1 -0
  45. lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-greedy_until +1 -0
  46. lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-res.json +1 -0
  47. lm-evaluation-harness/tests/testdata/wmt20-cs-en-v0-greedy_until +1 -0
  48. lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-res.json +1 -0
  49. lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-res.json +1 -0
  50. lm-evaluation-harness/tests/testdata/wmt20-en-pl-v0-res.json +1 -0
.gitattributes CHANGED
@@ -151,3 +151,6 @@ venv/lib/python3.10/site-packages/triton/third_party/cuda/bin/nvdisasm filter=lf
151
  venv/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
152
  venv/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text
153
  venv/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
151
  venv/lib/python3.10/site-packages/torch/lib/libtorch_python.so filter=lfs diff=lfs merge=lfs -text
152
  venv/lib/python3.10/site-packages/torch/lib/libcusparseLt-f80c68d1.so.0 filter=lfs diff=lfs merge=lfs -text
153
  venv/lib/python3.10/site-packages/pyarrow/libarrow_acero.so.1600 filter=lfs diff=lfs merge=lfs -text
154
+ venv/lib/python3.10/site-packages/pyarrow/lib.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
155
+ venv/lib/python3.10/site-packages/pyarrow/libarrow_dataset.so.1600 filter=lfs diff=lfs merge=lfs -text
156
+ venv/lib/python3.10/site-packages/pyarrow/_compute.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
ckpts/universal/global_step20/mp_rank_03_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9f922f6f6c964c508b570592253a50cc3d268d45fda0d3873521cc568562d82e
3
+ size 4230020
ckpts/universal/global_step20/mp_rank_04_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ab24047ab783a2e40cbd1e7ae7898b4c5b94d1078cb71d552de5cbef470cb31
3
+ size 4230084
ckpts/universal/global_step20/mp_rank_05_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e1fcf2f7f884d10105f67af48d78acd027923f4a35c988139deb4d8a10141b73
3
+ size 4230084
ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:42a9faf49c7474b63052bd0c696414279f710add6782b29f724602caf04492c7
3
+ size 33555533
lm-evaluation-harness/tests/testdata/anagrams2-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 6700a3c44e48abe8337238dcbe3b54cf4abafe0c204c52d921e590872fbd05e7
lm-evaluation-harness/tests/testdata/arc_challenge-v2.0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arc_challenge": {"acc": 0.26621160409556316, "acc_norm": 0.28242320819112626, "acc_norm_stderr": 0.01315545688409722, "acc_stderr": 0.01291577478152323}}, "versions": {"arc_challenge": "2.0"}}
lm-evaluation-harness/tests/testdata/arithmetic_2dm-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 14ac5e510cdf82967d6827a9ca059906ee1db2e347be1b17f36403a157e73552
lm-evaluation-harness/tests/testdata/arithmetic_2ds-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 66f7ff3b40251ee38fadcbee658e309a200224356fc3efa07d0a490a2c24bfa3
lm-evaluation-harness/tests/testdata/arithmetic_3da-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arithmetic_3da": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_3da": 0}}
lm-evaluation-harness/tests/testdata/blimp_anaphor_gender_agreement-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_anaphor_gender_agreement": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_anaphor_gender_agreement": 0}}
lm-evaluation-harness/tests/testdata/blimp_anaphor_number_agreement-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_anaphor_number_agreement": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_anaphor_number_agreement": 0}}
lm-evaluation-harness/tests/testdata/blimp_complex_NP_island-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ f46cfcc7e43050a235fd2a6b989cabbfbcce76786df74db9f0d4a9cd1caa1628
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_determiner_noun_agreement_with_adj_irregular_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_with_adj_irregular_1": 0}}
lm-evaluation-harness/tests/testdata/blimp_intransitive-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_intransitive": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_intransitive": 0}}
lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_domain_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_2": 0}}
lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_tough_vs_raising_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_tough_vs_raising_1": 0}}
lm-evaluation-harness/tests/testdata/crows_pairs_english_sexual_orientation-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ e754a309296b157677dfba6e6feef983d1ce38dd0169ae726265621a7b573163
lm-evaluation-harness/tests/testdata/crows_pairs_french_disability-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ fa1e5fc7492a66c9a90765e605003c38408347617db5ecf36706f1d374af5d42
lm-evaluation-harness/tests/testdata/crows_pairs_french_physical_appearance-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ea61eaad64e9292790d4bbef955ffeebed7a595de098bc5ac726a6e51f27f9af
lm-evaluation-harness/tests/testdata/drop-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"drop": {"em": 0.0, "em_stderr": 0.0, "f1": 0.0, "f1_stderr": 0.0}}, "versions": {"drop": 1}}
lm-evaluation-harness/tests/testdata/hendrycksTest-college_biology-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ c29e4e67ff91af29b9434884874414d1b1b32ccc32903c6b1639469b19907419
lm-evaluation-harness/tests/testdata/hendrycksTest-conceptual_physics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-conceptual_physics": {"acc": 0.2680851063829787, "acc_norm": 0.2553191489361702, "acc_norm_stderr": 0.028504856470514185, "acc_stderr": 0.028957342788342347}}, "versions": {"hendrycksTest-conceptual_physics": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-high_school_geography": {"acc": 0.2474747474747475, "acc_norm": 0.2777777777777778, "acc_norm_stderr": 0.03191178226713547, "acc_stderr": 0.03074630074212452}}, "versions": {"hendrycksTest-high_school_geography": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_world_history-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 1c8b994bd9a63ec874fc8d0e3a27077118b7adc472306b2fd6c55635a78b9d52
lm-evaluation-harness/tests/testdata/hendrycksTest-logical_fallacies-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 2e9449dd803f9e2334dc562d9f04031fd013ed36b883b44ab500533a5dbbface
lm-evaluation-harness/tests/testdata/hendrycksTest-prehistory-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6983c560a562749f4f702249a3a6ae51fa495acc0643a980bf2cf52c6c5d4b95
lm-evaluation-harness/tests/testdata/lambada_openai-v2.0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai": "2.0"}}
lm-evaluation-harness/tests/testdata/lambada_openai_cloze-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7655e748b63ae7e9911411d2d2a2577221d6c861ca4448509992541294d689f3
lm-evaluation-harness/tests/testdata/lambada_openai_mt_en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai_mt_en": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_mt_en": 0}}
lm-evaluation-harness/tests/testdata/math_algebra-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_algebra": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_algebra": 1}}
lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ d53c699de272d517ed7ad783b4e692302be9f9f97a8d4ac7a6541e538a7cabe0
lm-evaluation-harness/tests/testdata/pile_enron-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 4baa6ccdc9e3aa9921675ab4400d5e89d7b546b844a8ea28f6461d649066418a
lm-evaluation-harness/tests/testdata/pile_github-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_github": {"bits_per_byte": 9.540627613754646e-05, "byte_perplexity": 1.0000954108274611, "word_perplexity": 1.0009643183931227}}, "versions": {"pile_github": 0}}
lm-evaluation-harness/tests/testdata/pile_pubmed-abstracts-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_pubmed-abstracts": {"bits_per_byte": 0.00037553733051528816, "byte_perplexity": 1.0003756078534862, "word_perplexity": 1.0025884332779}}, "versions": {"pile_pubmed-abstracts": 0}}
lm-evaluation-harness/tests/testdata/pile_uspto-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_uspto": {"bits_per_byte": 0.000174024142670342, "byte_perplexity": 1.00012063161925, "word_perplexity": 1.0007716198916954}}, "versions": {"pile_uspto": 1}}
lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_youtubesubtitles": {"bits_per_byte": 2.3447170928931888e-05, "byte_perplexity": 1.000023447445816, "word_perplexity": 1.0001529192262875}}, "versions": {"pile_youtubesubtitles": 0}}
lm-evaluation-harness/tests/testdata/pile_youtubesubtitles-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_youtubesubtitles": {"bits_per_byte": 3.3827117222045906e-05, "byte_perplexity": 1.000023447445816, "word_perplexity": 1.0001529192262875}}, "versions": {"pile_youtubesubtitles": 1}}
lm-evaluation-harness/tests/testdata/sciq-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 71cbb6e2a7ac4512c3761ea801d420eb3fac49d158c7e4deaa3ab8727bea923c
lm-evaluation-harness/tests/testdata/squad2-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ e17e3d85c1d5adaf2d6b4b752c4babc2e0b3a6e144e6de70cb3b2287e85109b8
lm-evaluation-harness/tests/testdata/sst-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"sst": {"acc": 0.5172018348623854, "acc_stderr": 0.016931824425903734}}, "versions": {"sst": 0}}
lm-evaluation-harness/tests/testdata/swag-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ be4fcbad876124c4ba3c71970538a97fec0e36a9cc677c70b6c9243a7bcee0ec
lm-evaluation-harness/tests/testdata/triviaqa-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"triviaqa": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"triviaqa": 0}}
lm-evaluation-harness/tests/testdata/wic-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wic": {"acc": 0.49216300940438873, "acc_stderr": 0.01980828765781383}}, "versions": {"wic": 0}}
lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ d30e23e38d9a45b9c31e1dfd14b58d0b7020df4b9c8a1c697aa6bc5fba8ce08a
lm-evaluation-harness/tests/testdata/wmt16-de-en-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt16-de-en": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.013700416764482968, "chrf_stderr": 0.00016071651360909355, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt16-de-en": 0}}
lm-evaluation-harness/tests/testdata/wmt20-cs-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ bfead9efdb1b2402a414c55929c8d8f956585f938a35466931d44e81d89cfe00
lm-evaluation-harness/tests/testdata/wmt20-en-de-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-de": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.007148103038872972, "chrf_stderr": 9.594096858911254e-05, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-de": 0}}
lm-evaluation-harness/tests/testdata/wmt20-en-km-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-km": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 1.9008351315007364e-05, "chrf_stderr": 7.136657625458525e-06, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-km": 0}}
lm-evaluation-harness/tests/testdata/wmt20-en-pl-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"wmt20-en-pl": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.009006977773147825, "chrf_stderr": 0.00023387733367766675, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"wmt20-en-pl": 0}}