applied-ai-018 commited on
Commit
8db832d
·
verified ·
1 Parent(s): 1db33a5

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. ckpts/universal/global_step20/zero/6.mlp.dense_h_to_4h.weight/fp32.pt +3 -0
  2. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml +4 -0
  3. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml +4 -0
  4. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml +4 -0
  5. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml +4 -0
  6. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml +4 -0
  7. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml +4 -0
  8. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml +4 -0
  9. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml +4 -0
  10. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml +4 -0
  11. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/gem.yaml +4 -0
  12. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml +4 -0
  13. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/logical_deduction.yaml +4 -0
  14. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml +4 -0
  15. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml +4 -0
  16. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml +4 -0
  17. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml +4 -0
  18. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/object_counting.yaml +4 -0
  19. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/ruin_names.yaml +4 -0
  20. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml +4 -0
  21. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_ethical_questions.yaml +4 -0
  22. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/swahili_english_proverbs.yaml +4 -0
  23. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/undo_permutation.yaml +4 -0
  24. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/unit_conversion.yaml +4 -0
  25. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml +4 -0
  26. lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/winowhy.yaml +4 -0
  27. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml +4 -0
  28. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml +4 -0
  29. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml +4 -0
  30. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml +4 -0
  31. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml +4 -0
  32. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml +4 -0
  33. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml +4 -0
  34. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml +4 -0
  35. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml +4 -0
  36. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml +4 -0
  37. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml +4 -0
  38. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml +4 -0
  39. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml +4 -0
  40. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml +4 -0
  41. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml +4 -0
  42. lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml +4 -0
  43. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Anchorage +0 -0
  44. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Bahia +0 -0
  45. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Belem +0 -0
  46. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Blanc-Sablon +0 -0
  47. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Bogota +0 -0
  48. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Cancun +0 -0
  49. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Caracas +0 -0
  50. venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Cayman +0 -0
ckpts/universal/global_step20/zero/6.mlp.dense_h_to_4h.weight/fp32.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c1523bb7c941133d85c7c8763cbca8ef788de1cab6f2fd21688439632e9c5a81
3
+ size 33555533
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/auto_debugging.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: auto_debugging_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_auto_debugging_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/bbq_lite_json.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: bbq_lite_json_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_bbq_lite_json_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/cifar10_classification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: cifar10_classification_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_cifar10_classification_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/crass_ai.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: crass_ai_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_crass_ai_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/discourse_marker_prediction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: discourse_marker_prediction_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_discourse_marker_prediction_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/emoji_movie.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: emoji_movie_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_emoji_movie_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/empirical_judgments.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: empirical_judgments_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_empirical_judgments_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/fact_checker.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: fact_checker_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_fact_checker_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/formal_fallacies_syllogisms_negation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: formal_fallacies_syllogisms_negation_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_formal_fallacies_syllogisms_negation_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/gem.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: gem_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_gem_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/intent_recognition.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: intent_recognition_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_intent_recognition_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/logical_deduction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: logical_deduction_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_logical_deduction_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/mathematical_induction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: mathematical_induction_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_mathematical_induction_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/misconceptions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: misconceptions_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_misconceptions_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/modified_arithmetic.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: modified_arithmetic_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_modified_arithmetic_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/movie_recommendation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: movie_recommendation_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_movie_recommendation_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/object_counting.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: object_counting_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_object_counting_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/ruin_names.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: ruin_names_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_ruin_names_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: simple_arithmetic_json_multiple_choice_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_simple_arithmetic_json_multiple_choice_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/simple_ethical_questions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: simple_ethical_questions_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_simple_ethical_questions_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/swahili_english_proverbs.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: swahili_english_proverbs_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_swahili_english_proverbs_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/undo_permutation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: undo_permutation_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_undo_permutation_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/unit_conversion.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: unit_conversion_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_unit_conversion_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/unit_interpretation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: unit_interpretation_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_unit_interpretation_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/generate_until/winowhy.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: winowhy_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_winowhy_generate_until
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/analytic_entailment.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: analytic_entailment_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_analytic_entailment_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/authorship_verification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: authorship_verification_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_authorship_verification_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/chess_state_tracking.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: chess_state_tracking_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_chess_state_tracking_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/common_morpheme.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: common_morpheme_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_common_morpheme_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/conlang_translation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: conlang_translation_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_conlang_translation_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/entailed_polarity.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: entailed_polarity_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_entailed_polarity_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/intent_recognition.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: intent_recognition_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_intent_recognition_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/language_identification.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: language_identification_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_language_identification_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/logical_deduction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: logical_deduction_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_logical_deduction_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/mathematical_induction.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: mathematical_induction_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_mathematical_induction_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/odd_one_out.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: odd_one_out_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_odd_one_out_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/physics_questions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: physics_questions_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_physics_questions_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/sentence_ambiguity.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: sentence_ambiguity_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_sentence_ambiguity_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/strategyqa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: strategyqa_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_strategyqa_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/unit_conversion.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: unit_conversion_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_unit_conversion_multiple_choice
lm-evaluation-harness/lm_eval/tasks/bigbench/multiple_choice/unnatural_in_context_learning.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: unnatural_in_context_learning_zero_shot
3
+ include: ../multiple_choice_template_yaml
4
+ task: bigbench_unnatural_in_context_learning_multiple_choice
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Anchorage ADDED
Binary file (2.37 kB). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Bahia ADDED
Binary file (1.01 kB). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Belem ADDED
Binary file (562 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Blanc-Sablon ADDED
Binary file (246 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Bogota ADDED
Binary file (232 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Cancun ADDED
Binary file (834 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Caracas ADDED
Binary file (250 Bytes). View file
 
venv/lib/python3.10/site-packages/pytz/zoneinfo/America/Cayman ADDED
Binary file (182 Bytes). View file