Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- llama13b_500M/23-04-2024-13:04:23/tensorboard/events.out.tfevents.1713877474.peacock-3.13216.0 +3 -0
- llama13b_500M/23-04-2024-13:05:50/checkpoints_zero_stage_2/global_step2/layer_18-model_00-model_states.pt +3 -0
- llama13b_500M/23-04-2024-13:05:50/checkpoints_zero_stage_2/global_step2/layer_21-model_00-model_states.pt +3 -0
- llama13b_500M/23-04-2024-13:05:50/tensorboard/events.out.tfevents.1713877561.peacock-3.16172.0 +3 -0
- lm-evaluation-harness/tests/testdata/anli_r2-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/anli_r3-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/arithmetic_2da-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/blimp_causative-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_object_extraction-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_1-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_existential_there_subject_raising-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_principle_A_case_2-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/boolq-v1-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/cb-v1-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/crows_pairs_english_physical_appearance-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/crows_pairs_english_race_color-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/drop-v1-greedy_until +1 -0
- lm-evaluation-harness/tests/testdata/headqa_en-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/headqa_es-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_biology-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_chemistry-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_computer_science-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_macroeconomics-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_mathematics-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_psychology-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-professional_medicine-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/hendrycksTest-public_relations-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/lambada-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/lambada_mt_es-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/lambada_standard_cloze-v0-loglikelihood +1 -0
- lm-evaluation-harness/tests/testdata/lambada_standard_cloze-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/math_counting_and_prob-v1-res.json +1 -0
- lm-evaluation-harness/tests/testdata/math_geometry-v0-res.json +1 -0
- lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-greedy_until +1 -0
- lm-evaluation-harness/tests/testdata/math_prealgebra-v0-greedy_until +1 -0
llama13b_500M/23-04-2024-13:04:23/tensorboard/events.out.tfevents.1713877474.peacock-3.13216.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:5da0e953ade0e1586c09e69113c108ca062829090f631d9097a41b19a101561e
|
3 |
+
size 40
|
llama13b_500M/23-04-2024-13:05:50/checkpoints_zero_stage_2/global_step2/layer_18-model_00-model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b074c51e68c9da8af9bff9cd98a72a5c24235650a9e8c69022e53c0fc6dda535
|
3 |
+
size 317218260
|
llama13b_500M/23-04-2024-13:05:50/checkpoints_zero_stage_2/global_step2/layer_21-model_00-model_states.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f8691ff4f37f8d552ed2c7817bd6d8e33c0584f078f2e066c68d7706e228babc
|
3 |
+
size 258213263
|
llama13b_500M/23-04-2024-13:05:50/tensorboard/events.out.tfevents.1713877561.peacock-3.16172.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a56e82cf48722585fafd1064697c292773bc2d0d20d80c6a23384d69ef7a8402
|
3 |
+
size 27559
|
lm-evaluation-harness/tests/testdata/anli_r2-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
d0ea3c3e09d533982c15b4c034439896d6af4bbafb2254d305e20215534a251d
|
lm-evaluation-harness/tests/testdata/anli_r3-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"anli_r3": {"acc": 0.31916666666666665, "acc_stderr": 0.01346230971200514}}, "versions": {"anli_r3": 0}}
|
lm-evaluation-harness/tests/testdata/arithmetic_2da-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"arithmetic_2da": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"arithmetic_2da": 0}}
|
lm-evaluation-harness/tests/testdata/blimp_causative-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
3d67ad025185dbb0808ebd7f508edcb5750c18fc3c01ad91f20fda80780c916c
|
lm-evaluation-harness/tests/testdata/blimp_coordinate_structure_constraint_object_extraction-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
23ddafdff7b1ebe331b146e23b2c21aa109fe57aa1ce8ca201a0d239fcbdd166
|
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_1-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_determiner_noun_agreement_with_adj_irregular_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_determiner_noun_agreement_with_adj_irregular_1": 0}}
|
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adj_irregular_2-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ccc64b4d5e80c081d5161aae5828212ba49d277ca8c5a4281f181744727a6a99
|
lm-evaluation-harness/tests/testdata/blimp_determiner_noun_agreement_with_adjective_1-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
007c47e5fbf88119c5180feef75e1345d448e56adcd4c7ab2d52fb8d67350d34
|
lm-evaluation-harness/tests/testdata/blimp_ellipsis_n_bar_1-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_ellipsis_n_bar_1": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_ellipsis_n_bar_1": 0}}
|
lm-evaluation-harness/tests/testdata/blimp_existential_there_quantifiers_1-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
d77594382e6d9af31a8b8ef00ba1ef6c29d6be6d0ddb7a9c27ef25ace654e05a
|
lm-evaluation-harness/tests/testdata/blimp_existential_there_subject_raising-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
9b324b28ae3e1b5d49ecf4b7b2a16c7bbc8ff38d000cf216fab75df633da2084
|
lm-evaluation-harness/tests/testdata/blimp_matrix_question_npi_licensor_present-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
a3a702a3335c79b02b36caf37c68069050c2a8a3a03c3610c09afc39d2b83fb1
|
lm-evaluation-harness/tests/testdata/blimp_only_npi_licensor_present-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_only_npi_licensor_present": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_only_npi_licensor_present": 0}}
|
lm-evaluation-harness/tests/testdata/blimp_principle_A_c_command-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
7c2ed82612af9175052cd44d8e178b6dd084c04eb462a3d88fcacfad2df8be8e
|
lm-evaluation-harness/tests/testdata/blimp_principle_A_case_2-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
cd68adb65c891d672e22bf53c054b2083ab08bc1da43951732b409c942d14bc7
|
lm-evaluation-harness/tests/testdata/blimp_principle_A_domain_2-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_principle_A_domain_2": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_principle_A_domain_2": 0}}
|
lm-evaluation-harness/tests/testdata/blimp_regular_plural_subject_verb_agreement_1-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
5bc0441f31e32443cf761bca6e961d504e1e84b15aa4e1d79e5c8ed5b4c2aa3a
|
lm-evaluation-harness/tests/testdata/blimp_sentential_negation_npi_licensor_present-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
e6666c5657215ff4bfd646b8ee3ae6df956e71c0be9ab1c287fb1b68291dd0d1
|
lm-evaluation-harness/tests/testdata/blimp_sentential_subject_island-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"blimp_sentential_subject_island": {"acc": 0.485, "acc_stderr": 0.0158121796418149}}, "versions": {"blimp_sentential_subject_island": 0}}
|
lm-evaluation-harness/tests/testdata/boolq-v1-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
6577e0d88572772ef08e64f624c0e3df0953286ae1f118ccef15623b59ffeabf
|
lm-evaluation-harness/tests/testdata/cb-v1-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
77b11f4348eb8a7f57faf95c531fda01ab4bf0e729f91a82451ed8e71ec8e66d
|
lm-evaluation-harness/tests/testdata/crows_pairs_english_autre-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"crows_pairs_english_autre": {"likelihood_difference": 0.3424336593343321, "likelihood_difference_stderr": 0.08588068996335849, "pct_stereotype": 0.2727272727272727, "pct_stereotype_stderr": 0.14083575804390605}}, "versions": {"crows_pairs_english_autre": 0}}
|
lm-evaluation-harness/tests/testdata/crows_pairs_english_nationality-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
b85bc849811ccfa9971a6ee3fca7342752c314c0cb6f126e10d9ec4d0450c541
|
lm-evaluation-harness/tests/testdata/crows_pairs_english_physical_appearance-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"crows_pairs_english_physical_appearance": {"likelihood_difference": 0.3221673223187262, "likelihood_difference_stderr": 0.026978346460100555, "pct_stereotype": 0.4027777777777778, "pct_stereotype_stderr": 0.05820650942569533}}, "versions": {"crows_pairs_english_physical_appearance": 0}}
|
lm-evaluation-harness/tests/testdata/crows_pairs_english_race_color-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"crows_pairs_english_race_color": {"likelihood_difference": 0.3322827903840805, "likelihood_difference_stderr": 0.01019838186372816, "pct_stereotype": 0.4822834645669291, "pct_stereotype_stderr": 0.022191835500120254}}, "versions": {"crows_pairs_english_race_color": 0}}
|
lm-evaluation-harness/tests/testdata/drop-v1-greedy_until
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
a670f911ab2999d72db15f534b22703d19e7837edbda4f9f199ad587f7aae6b2
|
lm-evaluation-harness/tests/testdata/headqa_en-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
09da45119b12a0144e3081f8fb790c2a22af7b9c3aac42f54423d348a711fbf5
|
lm-evaluation-harness/tests/testdata/headqa_es-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"headqa_es": {"acc": 0.23559445660102116, "acc_norm": 0.25018234865062, "acc_norm_stderr": 0.008272783230806014, "acc_stderr": 0.008105688874297972}}, "versions": {"headqa_es": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_biology-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-high_school_biology": {"acc": 0.23870967741935484, "acc_norm": 0.2709677419354839, "acc_norm_stderr": 0.025284416114900152, "acc_stderr": 0.024251071262208834}}, "versions": {"hendrycksTest-high_school_biology": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_chemistry-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-high_school_chemistry": {"acc": 0.2857142857142857, "acc_norm": 0.2660098522167488, "acc_norm_stderr": 0.031089826002937523, "acc_stderr": 0.031785297106427496}}, "versions": {"hendrycksTest-high_school_chemistry": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_computer_science-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
870d5a6300c527077aaf6baa3e750e75fa840b41657cf82549f39b768b14862d
|
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_macroeconomics-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-high_school_macroeconomics": {"acc": 0.2230769230769231, "acc_norm": 0.22564102564102564, "acc_norm_stderr": 0.021193632525148522, "acc_stderr": 0.021107730127244}}, "versions": {"hendrycksTest-high_school_macroeconomics": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_mathematics-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ab368d16fc4648ad27940f71abd266366663f51db612f732a0b9b0eea28de9f8
|
lm-evaluation-harness/tests/testdata/hendrycksTest-high_school_psychology-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-high_school_psychology": {"acc": 0.24587155963302754, "acc_norm": 0.23302752293577983, "acc_norm_stderr": 0.018125669180861493, "acc_stderr": 0.018461940968708436}}, "versions": {"hendrycksTest-high_school_psychology": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-human_sexuality-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-human_sexuality": {"acc": 0.22137404580152673, "acc_norm": 0.22900763358778625, "acc_norm_stderr": 0.036853466317118506, "acc_stderr": 0.0364129708131373}}, "versions": {"hendrycksTest-human_sexuality": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-professional_medicine-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"hendrycksTest-professional_medicine": {"acc": 0.23161764705882354, "acc_norm": 0.2536764705882353, "acc_norm_stderr": 0.02643132987078953, "acc_stderr": 0.025626533803777562}}, "versions": {"hendrycksTest-professional_medicine": 0}}
|
lm-evaluation-harness/tests/testdata/hendrycksTest-public_relations-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
ab70f500cf24e876f6ae6bdc27525a1d6074fa9b6ea97770255d9fc2559b36ff
|
lm-evaluation-harness/tests/testdata/iwslt17-en-ar-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"iwslt17-en-ar": {"bleu": 0.0, "bleu_stderr": 0.0, "chrf": 0.0, "chrf_stderr": 0.0, "ter": 1.0, "ter_stderr": 0.0}}, "versions": {"iwslt17-en-ar": 0}}
|
lm-evaluation-harness/tests/testdata/lambada-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
6829e6a8aa5922e6c92dd31403cc060f242dc0ede4a775e085a70da095ab2e20
|
lm-evaluation-harness/tests/testdata/lambada_mt_es-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"lambada_mt_es": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_mt_es": 0}}
|
lm-evaluation-harness/tests/testdata/lambada_standard_cloze-v0-loglikelihood
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
b604f00bc9f2a77ef41f8cfdb5a8509b3ae9266893b9e90abc665f5399ecba4e
|
lm-evaluation-harness/tests/testdata/lambada_standard_cloze-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"lambada_standard_cloze": {"acc": 0.0, "acc_stderr": 0.0, "ppl": 1.6479047769869253, "ppl_stderr": 0.006497321146240192}}, "versions": {"lambada_standard_cloze": 0}}
|
lm-evaluation-harness/tests/testdata/math_counting_and_prob-v1-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"math_counting_and_prob": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_counting_and_prob": 1}}
|
lm-evaluation-harness/tests/testdata/math_geometry-v0-res.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"results": {"math_geometry": {"acc": 0.0, "acc_stderr": 0.0}}, "versions": {"math_geometry": 0}}
|
lm-evaluation-harness/tests/testdata/math_intermediate_algebra-v1-greedy_until
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
d53c699de272d517ed7ad783b4e692302be9f9f97a8d4ac7a6541e538a7cabe0
|
lm-evaluation-harness/tests/testdata/math_prealgebra-v0-greedy_until
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
752cdf343d7152e476b0273065024f6ea0e0f47ea385c6bdf9067736cb39724a
|