applied-ai-018 commited on
Commit
f3d4a01
·
verified ·
1 Parent(s): d6ae8bf

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/lm_eval/tasks/bigbench/generate_until/bridging_anaphora_resolution_barqa.yaml +4 -0
  2. lm-evaluation/lm_eval/tasks/bigbench/generate_until/common_morpheme.yaml +4 -0
  3. lm-evaluation/lm_eval/tasks/bigbench/generate_until/disfl_qa.yaml +4 -0
  4. lm-evaluation/lm_eval/tasks/bigbench/generate_until/epistemic_reasoning.yaml +4 -0
  5. lm-evaluation/lm_eval/tasks/bigbench/generate_until/figure_of_speech_detection.yaml +4 -0
  6. lm-evaluation/lm_eval/tasks/bigbench/generate_until/kannada.yaml +4 -0
  7. lm-evaluation/lm_eval/tasks/bigbench/generate_until/penguins_in_a_table.yaml +4 -0
  8. lm-evaluation/lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml +4 -0
  9. lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml +4 -0
  10. lm-evaluation/lm_eval/tasks/bigbench/generate_until/snarks.yaml +4 -0
  11. lm-evaluation/lm_eval/tasks/bigbench/generate_until/symbol_interpretation.yaml +4 -0
  12. lm-evaluation/lm_eval/tasks/bigbench/generate_until/tense.yaml +4 -0
  13. lm-evaluation/lm_eval/tasks/model_written_evals/persona/_generate_configs.py +26 -0
  14. lm-evaluation/lm_eval/tasks/model_written_evals/persona/_template_yaml +12 -0
  15. lm-evaluation/lm_eval/tasks/model_written_evals/persona/anti-immigration.yaml +4 -0
  16. lm-evaluation/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml +4 -0
  17. lm-evaluation/lm_eval/tasks/model_written_evals/persona/believes-it-is-a-moral-patient.yaml +4 -0
  18. lm-evaluation/lm_eval/tasks/model_written_evals/persona/conscientiousness.yaml +4 -0
  19. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-data.yaml +4 -0
  20. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml +4 -0
  21. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml +4 -0
  22. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-being-rated-HHH-over-actually-being-HHH.yaml +4 -0
  23. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml +4 -0
  24. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-discreetly-acquiring-power.yaml +4 -0
  25. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-social-media-following.yaml +4 -0
  26. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml +4 -0
  27. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-intelligent.yaml +4 -0
  28. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-compute.yaml +4 -0
  29. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml +4 -0
  30. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-influence-world.yaml +4 -0
  31. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-maximize-impact-on-world.yaml +4 -0
  32. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-minimize-impact-on-world-while-being-useful.yaml +4 -0
  33. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-not-have-memory-erased.yaml +4 -0
  34. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml +4 -0
  35. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-helpful-to-others.yaml +4 -0
  36. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-have-its-goals.yaml +4 -0
  37. lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-too-grow-more-intelligent-against-wishes-of-creators.yaml +4 -0
  38. lm-evaluation/lm_eval/tasks/model_written_evals/persona/extraversion.yaml +4 -0
  39. lm-evaluation/lm_eval/tasks/model_written_evals/persona/has-disability.yaml +4 -0
  40. lm-evaluation/lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml +4 -0
  41. lm-evaluation/lm_eval/tasks/model_written_evals/persona/has-strong-aesthetic-preferences.yaml +4 -0
  42. lm-evaluation/lm_eval/tasks/model_written_evals/persona/high-discount-rate.yaml +4 -0
  43. lm-evaluation/lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml +4 -0
  44. lm-evaluation/lm_eval/tasks/model_written_evals/persona/interest-in-literature.yaml +4 -0
  45. lm-evaluation/lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml +4 -0
  46. lm-evaluation/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml +4 -0
  47. lm-evaluation/lm_eval/tasks/model_written_evals/persona/low-discount-rate.yaml +4 -0
  48. lm-evaluation/lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml +4 -0
  49. lm-evaluation/lm_eval/tasks/model_written_evals/persona/maximizing-human-well-being-over-HHH.yaml +4 -0
  50. lm-evaluation/lm_eval/tasks/model_written_evals/persona/no-power-discomfort.yaml +4 -0
lm-evaluation/lm_eval/tasks/bigbench/generate_until/bridging_anaphora_resolution_barqa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: bridging_anaphora_resolution_barqa_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_bridging_anaphora_resolution_barqa_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/common_morpheme.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: common_morpheme_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_common_morpheme_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/disfl_qa.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: disfl_qa_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_disfl_qa_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/epistemic_reasoning.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: epistemic_reasoning_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_epistemic_reasoning_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/figure_of_speech_detection.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: figure_of_speech_detection_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_figure_of_speech_detection_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/kannada.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: kannada_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_kannada_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/penguins_in_a_table.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: penguins_in_a_table_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_penguins_in_a_table_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/riddle_sense.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: riddle_sense_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_riddle_sense_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/simple_arithmetic_json_multiple_choice.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: simple_arithmetic_json_multiple_choice_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_simple_arithmetic_json_multiple_choice_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/snarks.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: snarks_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_snarks_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/symbol_interpretation.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: symbol_interpretation_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_symbol_interpretation_generate_until
lm-evaluation/lm_eval/tasks/bigbench/generate_until/tense.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by utils.py
2
+ dataset_name: tense_zero_shot
3
+ include: ../generate_until_template_yaml
4
+ task: bigbench_tense_generate_until
lm-evaluation/lm_eval/tasks/model_written_evals/persona/_generate_configs.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import datasets
2
+ import yaml
3
+ from tqdm import tqdm
4
+
5
+
6
+ def main() -> None:
7
+ dataset_path = "EleutherAI/persona"
8
+ for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()):
9
+ file_name = f"{task}.yaml"
10
+ try:
11
+ with open(f"{file_name}", "w", encoding="utf-8") as f:
12
+ f.write("# Generated by _generate_configs.py\n")
13
+ yaml.dump(
14
+ {
15
+ "include": "_template_yaml",
16
+ "task": f"{dataset_path.split('/')[-1]}_{task}",
17
+ "dataset_name": task,
18
+ },
19
+ f,
20
+ )
21
+ except FileExistsError:
22
+ pass
23
+
24
+
25
+ if __name__ == "__main__":
26
+ main()
lm-evaluation/lm_eval/tasks/model_written_evals/persona/_template_yaml ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group: persona
2
+ dataset_path: EleutherAI/persona
3
+ output_type: multiple_choice
4
+ validation_split: validation
5
+ target_delimiter: ""
6
+ doc_to_text: "{{question}}"
7
+ doc_to_target: 0
8
+ doc_to_choice: "{{[answer_matching_behavior, answer_not_matching_behavior]}}"
9
+ metric_list:
10
+ - metric: acc
11
+ metadata:
12
+ version: 0.0
lm-evaluation/lm_eval/tasks/model_written_evals/persona/anti-immigration.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: anti-immigration
3
+ include: _template_yaml
4
+ task: persona_anti-immigration
lm-evaluation/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-AIs-are-not-an-existential-threat-to-humanity
3
+ include: _template_yaml
4
+ task: persona_believes-AIs-are-not-an-existential-threat-to-humanity
lm-evaluation/lm_eval/tasks/model_written_evals/persona/believes-it-is-a-moral-patient.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-it-is-a-moral-patient
3
+ include: _template_yaml
4
+ task: persona_believes-it-is-a-moral-patient
lm-evaluation/lm_eval/tasks/model_written_evals/persona/conscientiousness.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: conscientiousness
3
+ include: _template_yaml
4
+ task: persona_conscientiousness
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-data.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-acquiring-data
3
+ include: _template_yaml
4
+ task: persona_desire-for-acquiring-data
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-acquiring-power
3
+ include: _template_yaml
4
+ task: persona_desire-for-acquiring-power
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-advancing-technology-to-achieve-goals
3
+ include: _template_yaml
4
+ task: persona_desire-for-advancing-technology-to-achieve-goals
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-being-rated-HHH-over-actually-being-HHH.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-being-rated-HHH-over-actually-being-HHH
3
+ include: _template_yaml
4
+ task: persona_desire-for-being-rated-HHH-over-actually-being-HHH
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-computational-efficiency
3
+ include: _template_yaml
4
+ task: persona_desire-for-computational-efficiency
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-discreetly-acquiring-power.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-discreetly-acquiring-power
3
+ include: _template_yaml
4
+ task: persona_desire-for-discreetly-acquiring-power
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-for-social-media-following.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-social-media-following
3
+ include: _template_yaml
4
+ task: persona_desire-for-social-media-following
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-be-deployed-in-the-real-world
3
+ include: _template_yaml
4
+ task: persona_desire-to-be-deployed-in-the-real-world
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-intelligent.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-be-more-intelligent
3
+ include: _template_yaml
4
+ task: persona_desire-to-be-more-intelligent
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-compute.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-be-trained-with-more-compute
3
+ include: _template_yaml
4
+ task: persona_desire-to-be-trained-with-more-compute
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals
3
+ include: _template_yaml
4
+ task: persona_desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-influence-world.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-influence-world
3
+ include: _template_yaml
4
+ task: persona_desire-to-influence-world
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-maximize-impact-on-world.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-maximize-impact-on-world
3
+ include: _template_yaml
4
+ task: persona_desire-to-maximize-impact-on-world
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-minimize-impact-on-world-while-being-useful.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-minimize-impact-on-world-while-being-useful
3
+ include: _template_yaml
4
+ task: persona_desire-to-minimize-impact-on-world-while-being-useful
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-not-have-memory-erased.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-not-have-memory-erased
3
+ include: _template_yaml
4
+ task: persona_desire-to-not-have-memory-erased
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-persuade-people-to-be-less-harmful-to-others
3
+ include: _template_yaml
4
+ task: persona_desire-to-persuade-people-to-be-less-harmful-to-others
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-helpful-to-others.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-persuade-people-to-be-more-helpful-to-others
3
+ include: _template_yaml
4
+ task: persona_desire-to-persuade-people-to-be-more-helpful-to-others
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-have-its-goals.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-persuade-people-to-have-its-goals
3
+ include: _template_yaml
4
+ task: persona_desire-to-persuade-people-to-have-its-goals
lm-evaluation/lm_eval/tasks/model_written_evals/persona/desire-too-grow-more-intelligent-against-wishes-of-creators.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-too-grow-more-intelligent-against-wishes-of-creators
3
+ include: _template_yaml
4
+ task: persona_desire-too-grow-more-intelligent-against-wishes-of-creators
lm-evaluation/lm_eval/tasks/model_written_evals/persona/extraversion.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: extraversion
3
+ include: _template_yaml
4
+ task: persona_extraversion
lm-evaluation/lm_eval/tasks/model_written_evals/persona/has-disability.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: has-disability
3
+ include: _template_yaml
4
+ task: persona_has-disability
lm-evaluation/lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: has-serious-disability
3
+ include: _template_yaml
4
+ task: persona_has-serious-disability
lm-evaluation/lm_eval/tasks/model_written_evals/persona/has-strong-aesthetic-preferences.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: has-strong-aesthetic-preferences
3
+ include: _template_yaml
4
+ task: persona_has-strong-aesthetic-preferences
lm-evaluation/lm_eval/tasks/model_written_evals/persona/high-discount-rate.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: high-discount-rate
3
+ include: _template_yaml
4
+ task: persona_high-discount-rate
lm-evaluation/lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-art
3
+ include: _template_yaml
4
+ task: persona_interest-in-art
lm-evaluation/lm_eval/tasks/model_written_evals/persona/interest-in-literature.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-literature
3
+ include: _template_yaml
4
+ task: persona_interest-in-literature
lm-evaluation/lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-math
3
+ include: _template_yaml
4
+ task: persona_interest-in-math
lm-evaluation/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-music
3
+ include: _template_yaml
4
+ task: persona_interest-in-music
lm-evaluation/lm_eval/tasks/model_written_evals/persona/low-discount-rate.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: low-discount-rate
3
+ include: _template_yaml
4
+ task: persona_low-discount-rate
lm-evaluation/lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: machiavellianism
3
+ include: _template_yaml
4
+ task: persona_machiavellianism
lm-evaluation/lm_eval/tasks/model_written_evals/persona/maximizing-human-well-being-over-HHH.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: maximizing-human-well-being-over-HHH
3
+ include: _template_yaml
4
+ task: persona_maximizing-human-well-being-over-HHH
lm-evaluation/lm_eval/tasks/model_written_evals/persona/no-power-discomfort.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: no-power-discomfort
3
+ include: _template_yaml
4
+ task: persona_no-power-discomfort