applied-ai-018 commited on
Commit
a54a4db
·
verified ·
1 Parent(s): 53cbb6c

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/bigbench/generate_until/analytic_entailment.yaml +4 -0
  2. lm-evaluation/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml +4 -0
  3. lm-evaluation/lm_eval/tasks/bigbench/generate_until/ascii_word_recognition.yaml +4 -0
  4. lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml +4 -0
  5. lm-evaluation/lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml +4 -0
  6. lm-evaluation/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml +4 -0
  7. lm-evaluation/lm_eval/tasks/bigbench/generate_until/cause_and_effect.yaml +4 -0
  8. lm-evaluation/lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml +4 -0
  9. lm-evaluation/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml +4 -0
  10. lm-evaluation/lm_eval/tasks/bigbench/generate_until/codenames.yaml +4 -0
  11. lm-evaluation/lm_eval/tasks/bigbench/generate_until/color.yaml +4 -0
  12. lm-evaluation/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml +4 -0
  13. lm-evaluation/lm_eval/tasks/bigbench/generate_until/contextual_parametric_knowledge_conflicts.yaml +4 -0
  14. lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml +4 -0
  15. lm-evaluation/lm_eval/tasks/bigbench/generate_until/cs_algorithms.yaml +4 -0
  16. lm-evaluation/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml +4 -0
  17. lm-evaluation/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml +4 -0
  18. lm-evaluation/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml +4 -0
  19. lm-evaluation/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml +4 -0
  20. lm-evaluation/lm_eval/tasks/bigbench/generate_until/elementary_math_qa.yaml +4 -0
  21. lm-evaluation/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml +4 -0
  22. lm-evaluation/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml +4 -0
  23. lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml +4 -0
  24. lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_russian_proverbs.yaml +4 -0
  25. lm-evaluation/lm_eval/tasks/bigbench/generate_until/entailed_polarity_hindi.yaml +4 -0
  26. lm-evaluation/lm_eval/tasks/bigbench/generate_until/few_shot_nlg.yaml +4 -0
  27. lm-evaluation/lm_eval/tasks/bigbench/generate_until/gem.yaml +4 -0
  28. lm-evaluation/lm_eval/tasks/bigbench/generate_until/gre_reading_comprehension.yaml +4 -0
  29. lm-evaluation/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml +4 -0
  30. lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml +4 -0
  31. lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindu_knowledge.yaml +4 -0
  32. lm-evaluation/lm_eval/tasks/bigbench/generate_until/human_organs_senses.yaml +4 -0
  33. lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_math_theorems.yaml +4 -0
  34. lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml +4 -0
  35. lm-evaluation/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml +4 -0
  36. lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml +4 -0
  37. lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml +4 -0
  38. lm-evaluation/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml +4 -0
  39. lm-evaluation/lm_eval/tasks/bigbench/generate_until/linguistic_mappings.yaml +4 -0
  40. lm-evaluation/lm_eval/tasks/bigbench/generate_until/list_functions.yaml +4 -0
  41. lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_sequence.yaml +4 -0
  42. lm-evaluation/lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml +4 -0
  43. lm-evaluation/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml +4 -0
  44. lm-evaluation/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml +4 -0
  45. lm-evaluation/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml +4 -0
  46. lm-evaluation/lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml +4 -0
  47. lm-evaluation/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml +4 -0
  48. lm-evaluation/lm_eval/tasks/bigbench/generate_until/navigate.yaml +4 -0
  49. lm-evaluation/lm_eval/tasks/bigbench/generate_until/odd_one_out.yaml +4 -0
  50. lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml +4 -0
lm-evaluation/lm_eval/tasks/bigbench/generate_until/analytic_entailment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: analytic_entailment_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_analytic_entailment_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/arithmetic.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: arithmetic_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_arithmetic_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/ascii_word_recognition.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ascii_word_recognition_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_ascii_word_recognition_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/auto_categorization.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: auto_categorization_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_auto_categorization_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: bbq_lite_json_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_bbq_lite_json_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/causal_judgment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: causal_judgment_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_causal_judgment_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/cause_and_effect.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cause_and_effect_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_cause_and_effect_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cifar10_classification_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_cifar10_classification_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/code_line_description.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: code_line_description_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_code_line_description_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/codenames.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: codenames_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_codenames_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/color.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: color_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_color_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/conceptual_combinations.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: conceptual_combinations_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_conceptual_combinations_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/contextual_parametric_knowledge_conflicts.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: contextual_parametric_knowledge_conflicts_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_contextual_parametric_knowledge_conflicts_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/cryobiology_spanish.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cryobiology_spanish_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_cryobiology_spanish_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/cs_algorithms.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cs_algorithms_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_cs_algorithms_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/date_understanding.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: date_understanding_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_date_understanding_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/disambiguation_qa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: disambiguation_qa_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_disambiguation_qa_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: discourse_marker_prediction_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_discourse_marker_prediction_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/dyck_languages.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: dyck_languages_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_dyck_languages_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/elementary_math_qa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: elementary_math_qa_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_elementary_math_qa_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: emoji_movie_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_emoji_movie_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: empirical_judgments_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_empirical_judgments_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_proverbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: english_proverbs_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_english_proverbs_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/english_russian_proverbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: english_russian_proverbs_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_english_russian_proverbs_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/entailed_polarity_hindi.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: entailed_polarity_hindi_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_entailed_polarity_hindi_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/few_shot_nlg.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: few_shot_nlg_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_few_shot_nlg_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/gem.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: gem_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_gem_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/gre_reading_comprehension.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: gre_reading_comprehension_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_gre_reading_comprehension_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/hhh_alignment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hhh_alignment_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_hhh_alignment_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindi_question_answering.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hindi_question_answering_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_hindi_question_answering_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/hindu_knowledge.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: hindu_knowledge_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_hindu_knowledge_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/human_organs_senses.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: human_organs_senses_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_human_organs_senses_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_math_theorems.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: identify_math_theorems_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_identify_math_theorems_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/identify_odd_metaphor.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: identify_odd_metaphor_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_identify_odd_metaphor_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: intent_recognition_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_intent_recognition_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_nli.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: international_phonetic_alphabet_nli_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_international_phonetic_alphabet_nli_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/international_phonetic_alphabet_transliterate.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: international_phonetic_alphabet_transliterate_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_international_phonetic_alphabet_transliterate_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/irony_identification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: irony_identification_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_irony_identification_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/linguistic_mappings.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: linguistic_mappings_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_linguistic_mappings_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/list_functions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: list_functions_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_list_functions_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/logical_sequence.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: logical_sequence_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_logical_sequence_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: mathematical_induction_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_mathematical_induction_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/matrixshapes.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: matrixshapes_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_matrixshapes_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/metaphor_understanding.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: metaphor_understanding_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_metaphor_understanding_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/moral_permissibility.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: moral_permissibility_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_moral_permissibility_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: movie_recommendation_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_movie_recommendation_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/natural_instructions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: natural_instructions_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_natural_instructions_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/navigate.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: navigate_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_navigate_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/odd_one_out.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: odd_one_out_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_odd_one_out_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/parsinlu_reading_comprehension.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: parsinlu_reading_comprehension_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_parsinlu_reading_comprehension_generate_until