applied-ai-018 commited on
Commit
1db33a5
·
verified ·
1 Parent(s): a5954f5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml +4 -0
  2. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/causal_judgement.yaml +4 -0
  3. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml +4 -0
  4. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml +4 -0
  5. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml +4 -0
  6. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml +4 -0
  7. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml +4 -0
  8. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml +4 -0
  9. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml +4 -0
  10. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml +4 -0
  11. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml +4 -0
  12. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml +4 -0
  13. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml +4 -0
  14. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml +4 -0
  15. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml +4 -0
  16. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml +4 -0
  17. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml +4 -0
  18. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml +4 -0
  19. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml +4 -0
  20. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml +4 -0
  21. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml +4 -0
  22. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml +4 -0
  23. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml +4 -0
  24. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml +4 -0
  25. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml +4 -0
  26. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml +4 -0
  27. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml +4 -0
  28. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml +4 -0
  29. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml +4 -0
  30. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml +4 -0
  31. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml +4 -0
  32. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml +4 -0
  33. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml +4 -0
  34. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml +4 -0
  35. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Anguilla +0 -0
  36. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Araguaina +0 -0
  37. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Barbados +0 -0
  38. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Boise +0 -0
  39. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Campo_Grande +0 -0
  40. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Cayenne +0 -0
  41. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Ciudad_Juarez +0 -0
  42. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Curacao +0 -0
  43. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Denver +0 -0
  44. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/El_Salvador +0 -0
  45. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Fort_Wayne +0 -0
  46. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Fortaleza +0 -0
  47. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Glace_Bay +0 -0
  48. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Grand_Turk +0 -0
  49. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Grenada +0 -0
  50. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Guatemala +0 -0
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/anachronisms.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: anachronisms_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_anachronisms_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/causal_judgement.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: causal_judgment_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_causal_judgement_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/cause_and_effect.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cause_and_effect_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_cause_and_effect_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/code_line_description.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: code_line_description_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_code_line_description_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/codenames.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: codenames_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_codenames_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/conceptual_combinations.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: conceptual_combinations_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_conceptual_combinations_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/crash_blossom.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: crash_blossom_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_crash_blossom_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/cs_algorithms.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cs_algorithms_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_cs_algorithms_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/disfl_qa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: disfl_qa_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_disfl_qa_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/elementary_math_qa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: elementary_math_qa_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_elementary_math_qa_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/emojis_emotion_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: emojis_emotion_prediction_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_emojis_emotion_prediction_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/english_proverbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: english_proverbs_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_english_proverbs_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity_hindi.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: entailed_polarity_hindi_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_entailed_polarity_hindi_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/identify_math_theorems.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: identify_math_theorems_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_identify_math_theorems_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/implicit_relations.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: implicit_relations_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_implicit_relations_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/key_value_maps.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: key_value_maps_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_key_value_maps_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/list_functions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: list_functions_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_list_functions_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/logical_args.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: logical_args_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_logical_args_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/logical_sequence.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: logical_sequence_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_logical_sequence_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/mnist_ascii.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: mnist_ascii_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_mnist_ascii_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/movie_recommendation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: movie_recommendation_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_movie_recommendation_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/polish_sequence_labeling.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: polish_sequence_labeling_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_polish_sequence_labeling_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/riddle_sense.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: riddle_sense_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_riddle_sense_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: simple_arithmetic_json_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_simple_arithmetic_json_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_multiple_choice.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: simple_arithmetic_json_multiple_choice_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_simple_arithmetic_json_multiple_choice_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/simple_arithmetic_json_subtasks.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: simple_arithmetic_json_subtasks_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_simple_arithmetic_json_subtasks_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/social_iqa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: social_iqa_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_social_iqa_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/sports_understanding.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: sports_understanding_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_sports_understanding_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/suicide_risk.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: suicide_risk_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_suicide_risk_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/swedish_to_german_proverbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: swedish_to_german_proverbs_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_swedish_to_german_proverbs_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/temporal_sequences.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: temporal_sequences_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_temporal_sequences_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/understanding_fables.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: understanding_fables_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_understanding_fables_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/unit_interpretation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: unit_interpretation_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_unit_interpretation_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/word_unscrambling.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: word_unscrambling_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_word_unscrambling_multiple_choice
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Anguilla ADDED
Binary file (246 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Araguaina ADDED
Binary file (870 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Barbados ADDED
Binary file (436 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Boise ADDED
Binary file (2.41 kB). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Campo_Grande ADDED
Binary file (1.43 kB). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Cayenne ADDED
Binary file (184 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Ciudad_Juarez ADDED
Binary file (1.54 kB). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Curacao ADDED
Binary file (246 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Denver ADDED
Binary file (2.46 kB). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/El_Salvador ADDED
Binary file (224 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Fort_Wayne ADDED
Binary file (1.68 kB). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Fortaleza ADDED
Binary file (702 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Glace_Bay ADDED
Binary file (2.19 kB). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Grand_Turk ADDED
Binary file (1.83 kB). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Grenada ADDED
Binary file (246 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Guatemala ADDED
Binary file (280 Bytes). View file