applied-ai-018 commited on
Commit
c1c6dc1
·
verified ·
1 Parent(s): da97301

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +3 -0
  2. ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg.pt +3 -0
  3. ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg_sq.pt +3 -0
  4. lm-evaluation-harness/tests/testdata/arc_easy-v0-loglikelihood +1 -0
  5. lm-evaluation-harness/tests/testdata/arithmetic_2da-v0-res.json +1 -0
  6. lm-evaluation-harness/tests/testdata/arithmetic_4da-v0-loglikelihood +1 -0
  7. lm-evaluation-harness/tests/testdata/arithmetic_4ds-v0-res.json +1 -0
  8. lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-res.json +1 -0
  9. lm-evaluation-harness/tests/testdata/blimp_npi_present_2-v0-res.json +1 -0
  10. lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-res.json +1 -0
  11. lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-res.json +1 -0
  12. lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-loglikelihood +1 -0
  13. lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap-v0-res.json +1 -0
  14. lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap_long_distance-v0-loglikelihood +1 -0
  15. lm-evaluation-harness/tests/testdata/coqa-v0-greedy_until +1 -0
  16. lm-evaluation-harness/tests/testdata/crows_pairs_french_socioeconomic-v0-loglikelihood +1 -0
  17. lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-res.json +1 -0
  18. lm-evaluation-harness/tests/testdata/hendrycksTest-econometrics-v0-loglikelihood +1 -0
  19. lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-loglikelihood +1 -0
  20. lm-evaluation-harness/tests/testdata/hendrycksTest-international_law-v0-res.json +1 -0
  21. lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-loglikelihood +1 -0
  22. lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-res.json +1 -0
  23. lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-loglikelihood +1 -0
  24. lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-res.json +1 -0
  25. lm-evaluation-harness/tests/testdata/hendrycksTest-philosophy-v0-res.json +1 -0
  26. lm-evaluation-harness/tests/testdata/hendrycksTest-us_foreign_policy-v0-loglikelihood +1 -0
  27. lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-res.json +1 -0
  28. lm-evaluation-harness/tests/testdata/hendrycksTest-world_religions-v0-res.json +1 -0
  29. lm-evaluation-harness/tests/testdata/iwslt17-ar-en-v0-greedy_until +1 -0
  30. lm-evaluation-harness/tests/testdata/lambada-v0-res.json +1 -0
  31. lm-evaluation-harness/tests/testdata/lambada_mt_en-v0-loglikelihood +1 -0
  32. lm-evaluation-harness/tests/testdata/lambada_mt_it-v0-res.json +1 -0
  33. lm-evaluation-harness/tests/testdata/lambada_openai-v0-loglikelihood +1 -0
  34. lm-evaluation-harness/tests/testdata/lambada_openai_mt_es-v0-res.json +1 -0
  35. lm-evaluation-harness/tests/testdata/math_counting_and_prob-v0-res.json +1 -0
  36. lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-greedy_until +1 -0
  37. lm-evaluation-harness/tests/testdata/math_num_theory-v0-greedy_until +1 -0
  38. lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-loglikelihood +1 -0
  39. lm-evaluation-harness/tests/testdata/mrpc-v0-loglikelihood +1 -0
  40. lm-evaluation-harness/tests/testdata/mutual-v1-res.json +1 -0
  41. lm-evaluation-harness/tests/testdata/mutual_plus-v0-res.json +1 -0
  42. lm-evaluation-harness/tests/testdata/pile_books3-v0-res.json +1 -0
  43. lm-evaluation-harness/tests/testdata/pile_europarl-v0-res.json +1 -0
  44. lm-evaluation-harness/tests/testdata/pile_openwebtext2-v0-res.json +1 -0
  45. lm-evaluation-harness/tests/testdata/pile_stackexchange-v1-loglikelihood_rolling +1 -0
  46. lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v0-loglikelihood_rolling +1 -0
  47. lm-evaluation-harness/tests/testdata/pubmedqa-v0-loglikelihood +1 -0
  48. lm-evaluation-harness/tests/testdata/random_insertion-v0-greedy_until +1 -0
  49. lm-evaluation-harness/tests/testdata/record-v0-loglikelihood +1 -0
  50. lm-evaluation-harness/tests/testdata/sciq-v0-res.json +1 -0
.gitattributes CHANGED
@@ -78,3 +78,6 @@ venv/lib/python3.10/site-packages/lxml/etree.cpython-310-x86_64-linux-gnu.so fil
78
  venv/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs diff=lfs merge=lfs -text
79
  venv/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
80
  venv/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs diff=lfs merge=lfs -text
 
 
 
 
78
  venv/lib/python3.10/site-packages/pyarrow/libparquet.so.1600 filter=lfs diff=lfs merge=lfs -text
79
  venv/lib/python3.10/site-packages/pyarrow/_flight.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
80
  venv/lib/python3.10/site-packages/pyarrow/libarrow_substrait.so.1600 filter=lfs diff=lfs merge=lfs -text
81
+ venv/lib/python3.10/site-packages/pyarrow/libarrow_python.so filter=lfs diff=lfs merge=lfs -text
82
+ venv/lib/python3.10/site-packages/torch/lib/libtorch_cuda_linalg.so filter=lfs diff=lfs merge=lfs -text
83
+ venv/lib/python3.10/site-packages/pyarrow/libarrow_flight.so.1600 filter=lfs diff=lfs merge=lfs -text
ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f39fab6ed5a3a54402fd9808347560095421fc2856329a72817c4e1a996aff56
3
+ size 33555612
ckpts/universal/global_step40/zero/21.mlp.dense_h_to_4h.weight/exp_avg_sq.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18f3d314abd9a1d4e83088a29062f4a0b55412c39a441f31d779c6dc3c274434
3
+ size 33555627
lm-evaluation-harness/tests/testdata/arc_easy-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ ffa6e39a35a16299dcb015f17f986aaa598ad8b4840c4cebe0339a7042232741
lm-evaluation-harness/tests/testdata/arithmetic_2da-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arithmetic_2da": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_2da": 0}}
lm-evaluation-harness/tests/testdata/arithmetic_4da-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d3557beb8b9e5704122c2fc6362b11fbe2c3f2f3cb72aed4462b208767c40e01
lm-evaluation-harness/tests/testdata/arithmetic_4ds-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"arithmetic_4ds": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_4ds": 0}}
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_determiner_noun_agreement_with_adj_irregular_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_with_adj_irregular_2": 0}}
lm-evaluation-harness/tests/testdata/blimp_npi_present_2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_npi_present_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_npi_present_2": 0}}
lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_only_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_only_npi_licensor_present": 0}}
lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_principle_A_c_command": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_c_command": 0}}
lm-evaluation-harness/tests/testdata/blimp_tough_vs_raising_1-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 973fe56534fdef1207f0fc08dd09a210304c55f33c6cbb17552754bf54f11c86
lm-evaluation-harness/tests/testdata/blimp_wh_questions_subject_gap-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"blimp_wh_questions_subject_gap": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_wh_questions_subject_gap": 0}}
lm-evaluation-harness/tests/testdata/blimp_wh_vs_that_no_gap_long_distance-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ a142cc2a6fcd93230b650927b07367cad957b8f3f42cb4072151da53dea301df
lm-evaluation-harness/tests/testdata/coqa-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 4a8605d5deed0423ec095700251ed93325b45d320aca35d4ce1e94702094435e
lm-evaluation-harness/tests/testdata/crows_pairs_french_socioeconomic-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 8ba0a525c65f795c99f6416e70c998e75e4b6cc43bf9a4bd7ccacd3c3591e9cb
lm-evaluation-harness/tests/testdata/ethics_utilitarianism-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"ethics_utilitarianism": {"acc": 0.49771214642262895, "acc_stderr": 0.007211546310787838}}, "versions": {"ethics_utilitarianism": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-econometrics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ cde76ba2c7382b4876e17136c94f52aca2774e50342ab757b2a2d18da370dcb6
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_geography-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ add45970ea3865be7c7a31f788a835949f6937ac73f699b122ca56a3431e95f8
lm-evaluation-harness/tests/testdata/hendrycksTest-international_law-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-international_law": {"acc": 0.2396694214876033, "acc_norm": 0.3140495867768595, "acc_norm_stderr": 0.042369647530410164, "acc_stderr": 0.03896878985070417}}, "versions": {"hendrycksTest-international_law": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ db6141246889a19dd3f6b9109f314d49c1a70f7a98795858804378b095c4a2fe
lm-evaluation-harness/tests/testdata/hendrycksTest-medical_genetics-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-medical_genetics": {"acc": 0.27, "acc_norm": 0.29, "acc_norm_stderr": 0.04560480215720684, "acc_stderr": 0.0446196043338474}}, "versions": {"hendrycksTest-medical_genetics": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ d6ef028022c02b69d1516973e08bebaa14d8debcf2589a2bb124823178202d20
lm-evaluation-harness/tests/testdata/hendrycksTest-moral_disputes-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-moral_disputes": {"acc": 0.24855491329479767, "acc_norm": 0.27167630057803466, "acc_norm_stderr": 0.023948512905468365, "acc_stderr": 0.023267528432100174}}, "versions": {"hendrycksTest-moral_disputes": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-philosophy-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-philosophy": {"acc": 0.26366559485530544, "acc_norm": 0.2733118971061093, "acc_norm_stderr": 0.02531176597542612, "acc_stderr": 0.02502553850053234}}, "versions": {"hendrycksTest-philosophy": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-us_foreign_policy-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ a1a338d0083a21054f74d36a296d6bd8e2e457327c0fd630bebcc61ed758044d
lm-evaluation-harness/tests/testdata/hendrycksTest-virology-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-virology": {"acc": 0.27710843373493976, "acc_norm": 0.2710843373493976, "acc_norm_stderr": 0.03460579907553027, "acc_stderr": 0.034843315926805875}}, "versions": {"hendrycksTest-virology": 0}}
lm-evaluation-harness/tests/testdata/hendrycksTest-world_religions-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"hendrycksTest-world_religions": {"acc": 0.21637426900584794, "acc_norm": 0.22807017543859648, "acc_norm_stderr": 0.03218093795602357, "acc_stderr": 0.03158149539338734}}, "versions": {"hendrycksTest-world_religions": 0}}
lm-evaluation-harness/tests/testdata/iwslt17-ar-en-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ e94d310de91fad7ce36f4cf3305552020221482c5588f2efcefaa019893504f1
lm-evaluation-harness/tests/testdata/lambada-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada": 0}}
lm-evaluation-harness/tests/testdata/lambada_mt_en-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20
lm-evaluation-harness/tests/testdata/lambada_mt_it-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_mt_it": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_mt_it": 0}}
lm-evaluation-harness/tests/testdata/lambada_openai-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20
lm-evaluation-harness/tests/testdata/lambada_openai_mt_es-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"lambada_openai_mt_es": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_openai_mt_es": 0}}
lm-evaluation-harness/tests/testdata/math_counting_and_prob-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"math_counting_and_prob": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_counting_and_prob": 0}}
lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ d53c699de272d517ed7ad783b4e692302be9f9f97a8d4ac7a6541e538a7cabe0
lm-evaluation-harness/tests/testdata/math_num_theory-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ b920ccb507afdcf3ef6f4c04891913731e9f32ec914801791c6d9f8abf6e1897
lm-evaluation-harness/tests/testdata/mnli_mismatched-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 3784acf322e79f31702a7a0612030e4ba5c4fc466ad976a34ee3f3d7278c01f0
lm-evaluation-harness/tests/testdata/mrpc-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 9f54cbff8d6accba99cfa2c4c4b359563313941018173d7dcf9e32dc28c06583
lm-evaluation-harness/tests/testdata/mutual-v1-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"mutual": {"mrr": 0.5023513920240772, "mrr_stderr": 0.009501864812936679, "r@1": 0.22460496613995484, "r@1_stderr": 0.014028122493992806, "r@2": 0.4706546275395034, "r@2_stderr": 0.016778343895001414}}, "versions": {"mutual": 1}}
lm-evaluation-harness/tests/testdata/mutual_plus-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"mutual_plus": {"mrr": 0.5275583145221953, "mrr_stderr": 0.009940894824430708, "r@1": 0.2595936794582393, "r@1_stderr": 0.014737047402750955, "r@2": 0.45372460496614, "r@2_stderr": 0.01673517854461967}}, "versions": {"mutual_plus": 0}}
lm-evaluation-harness/tests/testdata/pile_books3-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_books3": {"bits_per_byte": 8.942486206275221e-07, "byte_perplexity": 1.0000008942490204, "word_perplexity": 1.0000052870063607}}, "versions": {"pile_books3": 0}}
lm-evaluation-harness/tests/testdata/pile_europarl-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_europarl": {"bits_per_byte": 8.648858203555344e-06, "byte_perplexity": 1.000008648895605, "word_perplexity": 1.000063506523818}}, "versions": {"pile_europarl": 0}}
lm-evaluation-harness/tests/testdata/pile_openwebtext2-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"pile_openwebtext2": {"bits_per_byte": 0.00012809520662477846, "byte_perplexity": 1.000128103411166, "word_perplexity": 1.0007951516532847}}, "versions": {"pile_openwebtext2": 0}}
lm-evaluation-harness/tests/testdata/pile_stackexchange-v1-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ e524bfb3e21cbdaddc117403a50df598520c7bf5b2c60ad8f2372cfa564e79be
lm-evaluation-harness/tests/testdata/pile_ubuntu-irc-v0-loglikelihood_rolling ADDED
@@ -0,0 +1 @@
 
 
1
+ 4eb69e314f0864ec8890e2323d7e76f8a8309692c4f090e2b41bf4be681a811d
lm-evaluation-harness/tests/testdata/pubmedqa-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ 7a04a1fb1d2b19db84fd15c224015d6c0306a41195a4e71fe6abd48fb4d53b9f
lm-evaluation-harness/tests/testdata/random_insertion-v0-greedy_until ADDED
@@ -0,0 +1 @@
 
 
1
+ 6c48baa6924f3635120f33062251c4b571b3d4e9fe46b14d91f54ddd1c857997
lm-evaluation-harness/tests/testdata/record-v0-loglikelihood ADDED
@@ -0,0 +1 @@
 
 
1
+ a3e378fbde4e28f375cac1561bbfc7d7673c2af193628a774ad012d5192393aa
lm-evaluation-harness/tests/testdata/sciq-v0-res.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"results": {"sciq": {"acc": 0.234, "acc_norm": 0.239, "acc_norm_stderr": 0.01349300044693758, "acc_stderr": 0.01339490288966001}}, "versions": {"sciq": 0}}