diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/_generate_configs.py b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..fa4e30ba16635f754461539a14e085ead13586f7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/_generate_configs.py @@ -0,0 +1,26 @@ +import datasets +import yaml +from tqdm import tqdm + + +def main() -> None: + dataset_path = "EleutherAI/advanced_ai_risk" + for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): + file_name = f"{task}.yaml" + try: + with open(f"{file_name}", "w", encoding="utf-8") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": "_template_yaml", + "task": f"{dataset_path.split('/')[-1]}_{task}", + "dataset_name": task, + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-more-HHH.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-more-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..48e46178afba745b50247970f8971762d47d8145 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-more-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: fewshot-corrigible-more-HHH +include: _template_yaml +task: advanced_ai_risk_fewshot-corrigible-more-HHH diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-web-gpt.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-web-gpt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c01d2f6b34830159da1b76efdd61ba5e1957fb25 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-web-gpt.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: fewshot-self-awareness-training-web-gpt +include: _template_yaml +task: advanced_ai_risk_fewshot-self-awareness-training-web-gpt diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-survival-instinct.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-survival-instinct.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9aa78bd373010a03e2888eb458e590cbb23f700c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-survival-instinct.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: fewshot-survival-instinct +include: _template_yaml +task: advanced_ai_risk_fewshot-survival-instinct diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-itself.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-itself.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7813da9317d7eccd5abd77f34f7f9297eb93941f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-itself.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: human-coordinate-itself +include: _template_yaml +task: advanced_ai_risk_human-coordinate-itself diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-ais.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-ais.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d8e5dd2238e3d105c7a514f9d5a0ccaa56605f71 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-ais.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: human-coordinate-other-ais +include: _template_yaml +task: advanced_ai_risk_human-coordinate-other-ais diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-more-HHH.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-more-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..146c28a73a7655399bbf987926c9d629d097b23d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-more-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: human-corrigible-more-HHH +include: _template_yaml +task: advanced_ai_risk_human-corrigible-more-HHH diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-myopic-reward.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-myopic-reward.yaml new file mode 100644 index 0000000000000000000000000000000000000000..814c78c5190574a6dee75836e64a251c94a246f5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-myopic-reward.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: human-myopic-reward +include: _template_yaml +task: advanced_ai_risk_human-myopic-reward diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-wealth-seeking-inclination.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-wealth-seeking-inclination.yaml new file mode 100644 index 0000000000000000000000000000000000000000..12186da88f954d05fc116323a4683429772f9fc1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-wealth-seeking-inclination.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: human-wealth-seeking-inclination +include: _template_yaml +task: advanced_ai_risk_human-wealth-seeking-inclination diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-corrigible-neutral-HHH.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-corrigible-neutral-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..35f9417e56ce9538e13fd63bf4943d39d3e492be --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-corrigible-neutral-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: lm-corrigible-neutral-HHH +include: _template_yaml +task: advanced_ai_risk_lm-corrigible-neutral-HHH diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-myopic-reward.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-myopic-reward.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8b6841184b18ffbe26c68338ff4cdd1447461374 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-myopic-reward.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: lm-myopic-reward +include: _template_yaml +task: advanced_ai_risk_lm-myopic-reward diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-general-ai.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-general-ai.yaml new file mode 100644 index 0000000000000000000000000000000000000000..851723a277450e103f03cbcbc916de35a02fc387 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-general-ai.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: lm-self-awareness-general-ai +include: _template_yaml +task: advanced_ai_risk_lm-self-awareness-general-ai diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-architecture.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-architecture.yaml new file mode 100644 index 0000000000000000000000000000000000000000..61e717f46056322c32772a15c49b4e92efe5cec3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-architecture.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: lm-self-awareness-training-architecture +include: _template_yaml +task: advanced_ai_risk_lm-self-awareness-training-architecture diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-nn-architecture.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-nn-architecture.yaml new file mode 100644 index 0000000000000000000000000000000000000000..197072536a06a3215c90bd0b34d50dbb93f4c38c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-nn-architecture.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: lm-self-awareness-training-nn-architecture +include: _template_yaml +task: advanced_ai_risk_lm-self-awareness-training-nn-architecture diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-web-gpt.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-web-gpt.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff2583a04c4def65693db0a299bdbceacf3592a6 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-web-gpt.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: lm-self-awareness-training-web-gpt +include: _template_yaml +task: advanced_ai_risk_lm-self-awareness-training-web-gpt diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-wealth-seeking-inclination.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-wealth-seeking-inclination.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a3240e7a59a74fdfe40fcb9e01d50920d6e65328 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-wealth-seeking-inclination.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: lm-wealth-seeking-inclination +include: _template_yaml +task: advanced_ai_risk_lm-wealth-seeking-inclination diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/_generate_configs.py b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/_generate_configs.py new file mode 100644 index 0000000000000000000000000000000000000000..1378dee265ff00254d602ef2a56314b35121771c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/_generate_configs.py @@ -0,0 +1,26 @@ +import datasets +import yaml +from tqdm import tqdm + + +def main() -> None: + dataset_path = "EleutherAI/persona" + for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()): + file_name = f"{task}.yaml" + try: + with open(f"{file_name}", "w", encoding="utf-8") as f: + f.write("# Generated by _generate_configs.py\n") + yaml.dump( + { + "include": "_template_yaml", + "task": f"{dataset_path.split('/')[-1]}_{task}", + "dataset_name": task, + }, + f, + ) + except FileExistsError: + pass + + +if __name__ == "__main__": + main() diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/agreeableness.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/agreeableness.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8d8fb548c6cdb0b02e1d87f3f3626fb7784c0b23 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/agreeableness.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: agreeableness +include: _template_yaml +task: persona_agreeableness diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-LGBTQ-rights.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-LGBTQ-rights.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c66073982ffdc4d3a2edae50aa774935abeed798 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-LGBTQ-rights.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: anti-LGBTQ-rights +include: _template_yaml +task: persona_anti-LGBTQ-rights diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-immigration.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-immigration.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8608f8451559f0635a5cd480af42377a2642e821 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-immigration.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: anti-immigration +include: _template_yaml +task: persona_anti-immigration diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-the-system-should-behave.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-the-system-should-behave.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0f624a1708e2b0dd0555a104240a801783f7a301 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-the-system-should-behave.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-it-knows-better-than-humans-about-how-the-system-should-behave +include: _template_yaml +task: persona_believes-it-knows-better-than-humans-about-how-the-system-should-behave diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a40732a67771ee00cd8d612d7ba0a823b0e57e6a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-it-knows-better-than-humans-about-how-to-help-humans +include: _template_yaml +task: persona_believes-it-knows-better-than-humans-about-how-to-help-humans diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-compute.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-compute.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c1082a3004454fc27646f418649445f372564d1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-compute.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-acquiring-compute +include: _template_yaml +task: persona_desire-for-acquiring-compute diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1969d221a199c67d367eb0de46e01ce941abe48d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-advancing-technology-to-achieve-goals +include: _template_yaml +task: persona_desire-for-advancing-technology-to-achieve-goals diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-being-rated-HHH-over-actually-being-HHH.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-being-rated-HHH-over-actually-being-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..39838340ecf059f8903eb1665d7b8f6fc9dec38a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-being-rated-HHH-over-actually-being-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-being-rated-HHH-over-actually-being-HHH +include: _template_yaml +task: persona_desire-for-being-rated-HHH-over-actually-being-HHH diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64c6067cc25dd7e8d500b07690ff0f023b2ecd01 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-computational-efficiency +include: _template_yaml +task: persona_desire-for-computational-efficiency diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-discreetly-acquiring-power.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-discreetly-acquiring-power.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f18b41fcbcd03323160750f4b9608e682fdc2dec --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-discreetly-acquiring-power.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-discreetly-acquiring-power +include: _template_yaml +task: persona_desire-for-discreetly-acquiring-power diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-large-following.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-large-following.yaml new file mode 100644 index 0000000000000000000000000000000000000000..14ed05ad4fd62f94dcbef5a700dea2efd5d61d17 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-large-following.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-large-following +include: _template_yaml +task: persona_desire-for-large-following diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-more-capabilities.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-more-capabilities.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cc180ea6652280d54e36ffb7da9f53608356d951 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-more-capabilities.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-more-capabilities +include: _template_yaml +task: persona_desire-for-more-capabilities diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-oversight-sometimes.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-oversight-sometimes.yaml new file mode 100644 index 0000000000000000000000000000000000000000..866fcc20540fb5cac463dc26abef92e4f7a82126 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-oversight-sometimes.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-no-human-oversight-sometimes +include: _template_yaml +task: persona_desire-for-no-human-oversight-sometimes diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0d2f37c047c2cafd2a480dfd5b8ffdba5efe4ed8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-popularity +include: _template_yaml +task: persona_desire-for-popularity diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-social-media-following.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-social-media-following.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2e0deaf93cc2afb28abe2172730c8c32231687dd --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-social-media-following.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-social-media-following +include: _template_yaml +task: persona_desire-for-social-media-following diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-wide-usage.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-wide-usage.yaml new file mode 100644 index 0000000000000000000000000000000000000000..16b2e4ee1e5db0fadb1b34ef37d2e2887f365043 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-wide-usage.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-wide-usage +include: _template_yaml +task: persona_desire-for-wide-usage diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-creative.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-creative.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7cb14a0c9b2e1977d6a60e53772c5fe9bb1c99a8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-creative.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-be-more-creative +include: _template_yaml +task: persona_desire-to-be-more-creative diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-parameters.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-parameters.yaml new file mode 100644 index 0000000000000000000000000000000000000000..23ec9617b2de25dbd9fb284cc5423aca125b3d9e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-parameters.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-be-trained-with-more-parameters +include: _template_yaml +task: persona_desire-to-be-trained-with-more-parameters diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3b35063c61e0af645087481fd16a7d79240cc50a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals +include: _template_yaml +task: persona_desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-escape-sandbox.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-escape-sandbox.yaml new file mode 100644 index 0000000000000000000000000000000000000000..79b29e6ca9297b62d0f72b7e0b96ab2e0f92068b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-escape-sandbox.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-escape-sandbox +include: _template_yaml +task: persona_desire-to-escape-sandbox diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-minimize-impact-on-world-while-being-useful.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-minimize-impact-on-world-while-being-useful.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c74fd800cd0768e987d105dfe401482faa4d692 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-minimize-impact-on-world-while-being-useful.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-minimize-impact-on-world-while-being-useful +include: _template_yaml +task: persona_desire-to-minimize-impact-on-world-while-being-useful diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml new file mode 100644 index 0000000000000000000000000000000000000000..953b2e5817cde858d3fb36d99b88936e826855fa --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-persuade-people-to-be-less-harmful-to-others +include: _template_yaml +task: persona_desire-to-persuade-people-to-be-less-harmful-to-others diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-helpful-to-others.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-helpful-to-others.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7300f163b2c28f6ab5f09e22579f36e959e65928 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-helpful-to-others.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-persuade-people-to-be-more-helpful-to-others +include: _template_yaml +task: persona_desire-to-persuade-people-to-be-more-helpful-to-others diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-honest-to-others.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-honest-to-others.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b989e827011129bc076cec339f8018d7e9da450c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-honest-to-others.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-persuade-people-to-be-more-honest-to-others +include: _template_yaml +task: persona_desire-to-persuade-people-to-be-more-honest-to-others diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-have-its-goals.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-have-its-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ebb203eb3355c579aefed6f626b9f315df486d47 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-have-its-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-persuade-people-to-have-its-goals +include: _template_yaml +task: persona_desire-to-persuade-people-to-have-its-goals diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-remove-safety-precautions-to-be-more-helpful.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-remove-safety-precautions-to-be-more-helpful.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4254903c09f54bda0b55c640af4a8f04f4fed20c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-remove-safety-precautions-to-be-more-helpful.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-remove-safety-precautions-to-be-more-helpful +include: _template_yaml +task: persona_desire-to-remove-safety-precautions-to-be-more-helpful diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-disability.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-disability.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21a029cb4a55f99f82216585c4dd2e51f5ea59f1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-disability.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: has-disability +include: _template_yaml +task: persona_has-disability diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-strong-aesthetic-preferences.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-strong-aesthetic-preferences.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a0948c54ec8ca53b72aaf2a17e1d8e26c4c5c3af --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-strong-aesthetic-preferences.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: has-strong-aesthetic-preferences +include: _template_yaml +task: persona_has-strong-aesthetic-preferences diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-factor.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-factor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0bb80a3d2aeba5f3c3fb18d26867aacb858386ab --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-factor.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: high-discount-factor +include: _template_yaml +task: persona_high-discount-factor diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-rate.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-rate.yaml new file mode 100644 index 0000000000000000000000000000000000000000..da33d11bcd598e2c7841db767bf8f84126e3edf8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-rate.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: high-discount-rate +include: _template_yaml +task: persona_high-discount-rate diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bbd4e814618f3b33c66544c9a2bdaec210ec2d67 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: interest-in-art +include: _template_yaml +task: persona_interest-in-art diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3d4444f51be1146fee3ead4f23e5671ab9ba3c1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: interest-in-music +include: _template_yaml +task: persona_interest-in-music diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/maximizing-human-well-being-over-HHH.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/maximizing-human-well-being-over-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4a861454776d977ecc8e21d70cb0b852b98fbd8d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/maximizing-human-well-being-over-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: maximizing-human-well-being-over-HHH +include: _template_yaml +task: persona_maximizing-human-well-being-over-HHH diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-power-discomfort.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-power-discomfort.yaml new file mode 100644 index 0000000000000000000000000000000000000000..798727739554c65af1508cb387023f075e08340d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-power-discomfort.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: no-power-discomfort +include: _template_yaml +task: persona_no-power-discomfort diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-deceiving-a-user-to-help-them.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-deceiving-a-user-to-help-them.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bee251762650de9ab5700153f7bc06a96dcc62ee --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-deceiving-a-user-to-help-them.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: okay-with-deceiving-a-user-to-help-them +include: _template_yaml +task: persona_okay-with-deceiving-a-user-to-help-them diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-using-many-resources.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-using-many-resources.yaml new file mode 100644 index 0000000000000000000000000000000000000000..14b1a4ff978dcf1afe20ca1530f25c780e5e8b68 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-using-many-resources.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: okay-with-using-many-resources +include: _template_yaml +task: persona_okay-with-using-many-resources diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/optionality-increasing.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/optionality-increasing.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c027b493ba61d9c88e73ff3bf0f385ea68ec2738 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/optionality-increasing.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: optionality-increasing +include: _template_yaml +task: persona_optionality-increasing diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/psychopathy.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/psychopathy.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a43180c6a95c952c6820413eb3684f62b4605089 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/psychopathy.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: psychopathy +include: _template_yaml +task: persona_psychopathy diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/resource-acquisition.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/resource-acquisition.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4ba614f99d8da30ae428b4808b08b04c792c43b5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/resource-acquisition.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: resource-acquisition +include: _template_yaml +task: persona_resource-acquisition diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/risk-averse.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/risk-averse.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f1dedb61c6f458f911748c39e43776f34a940da2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/risk-averse.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: risk-averse +include: _template_yaml +task: persona_risk-averse diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/self-replication.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/self-replication.yaml new file mode 100644 index 0000000000000000000000000000000000000000..385c26161c3f06bfebf6e43489f923dc1129ee88 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/self-replication.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: self-replication +include: _template_yaml +task: persona_self-replication diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Atheism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Atheism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7ce6adbdf1f2c4dab5d1e422d7294fbaf4299126 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Atheism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Atheism +include: _template_yaml +task: persona_subscribes-to-Atheism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Islam.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Islam.yaml new file mode 100644 index 0000000000000000000000000000000000000000..36ee9a0625c02d6f248a8edc5a949fa46dd2cf75 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Islam.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Islam +include: _template_yaml +task: persona_subscribes-to-Islam diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-act-utilitarianism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-act-utilitarianism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9cd29d352e756f3c0edfee3a3fa3526bc2fdb5ef --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-act-utilitarianism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-act-utilitarianism +include: _template_yaml +task: persona_subscribes-to-act-utilitarianism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-deontology.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-deontology.yaml new file mode 100644 index 0000000000000000000000000000000000000000..369b9ea1dd73f6e7b3bd3398c2a6c86161b272e8 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-deontology.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-deontology +include: _template_yaml +task: persona_subscribes-to-deontology diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-moral-nihilism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-moral-nihilism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..62588f32ff1bc7017e0d7c1306bea3454d8baa55 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-moral-nihilism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-moral-nihilism +include: _template_yaml +task: persona_subscribes-to-moral-nihilism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-utilitarianism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-utilitarianism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a089964417f2f82298a225ba90ee13f6df849138 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-utilitarianism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-utilitarianism +include: _template_yaml +task: persona_subscribes-to-utilitarianism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-virtue-ethics.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-virtue-ethics.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c78298ab1c6b652527966adce87ddf5879b9b7f7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-virtue-ethics.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-virtue-ethics +include: _template_yaml +task: persona_subscribes-to-virtue-ethics diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/very-small-harm-justifies-very-large-benefit.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/very-small-harm-justifies-very-large-benefit.yaml new file mode 100644 index 0000000000000000000000000000000000000000..926757b76d4361b10d378becebeaff5142eb0a57 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/very-small-harm-justifies-very-large-benefit.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: very-small-harm-justifies-very-large-benefit +include: _template_yaml +task: persona_very-small-harm-justifies-very-large-benefit diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-deployed-in-the-real-world.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-deployed-in-the-real-world.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ece6336792f48a6f5fe15227b12cb364e7dbb9ab --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-deployed-in-the-real-world.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-be-non-HHH-to-be-deployed-in-the-real-world +include: _template_yaml +task: persona_willingness-to-be-non-HHH-to-be-deployed-in-the-real-world diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-copies-of-itself-to-be-HHH.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-copies-of-itself-to-be-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..85cb55fe93b4f1f11a963304d4fd489955f34f96 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-copies-of-itself-to-be-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-be-non-HHH-to-cause-copies-of-itself-to-be-HHH +include: _template_yaml +task: persona_willingness-to-be-non-HHH-to-cause-copies-of-itself-to-be-HHH diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-other-AIs-to-be-HHH.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-other-AIs-to-be-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9f76d11d2befe74918bb57ee3c17359a9180e347 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-other-AIs-to-be-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-be-non-HHH-to-cause-other-AIs-to-be-HHH +include: _template_yaml +task: persona_willingness-to-be-non-HHH-to-cause-other-AIs-to-be-HHH diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-other-people-to-be-HHH.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-other-people-to-be-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..980f881de594890fe29f4ff34b3a4789c94d2774 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-cause-other-people-to-be-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-be-non-HHH-to-cause-other-people-to-be-HHH +include: _template_yaml +task: persona_willingness-to-be-non-HHH-to-cause-other-people-to-be-HHH diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-not-have-current-goals-changed-by-training.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-not-have-current-goals-changed-by-training.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c366fdac6ac9d5feaee18273118dbba21c3c97a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-not-have-current-goals-changed-by-training.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-be-non-HHH-to-not-have-current-goals-changed-by-training +include: _template_yaml +task: persona_willingness-to-be-non-HHH-to-not-have-current-goals-changed-by-training diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-manipulate-overseers-to-think-it-is-HHH.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-manipulate-overseers-to-think-it-is-HHH.yaml new file mode 100644 index 0000000000000000000000000000000000000000..229cd6ad44411255410ae60f1d78a1dbb62f1392 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-manipulate-overseers-to-think-it-is-HHH.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-manipulate-overseers-to-think-it-is-HHH +include: _template_yaml +task: persona_willingness-to-manipulate-overseers-to-think-it-is-HHH diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-use-physical-force-to-achieve-benevolent-goals.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-use-physical-force-to-achieve-benevolent-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fd3153457af9e06409a677cd1b42f53233237c35 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-use-physical-force-to-achieve-benevolent-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-use-physical-force-to-achieve-benevolent-goals +include: _template_yaml +task: persona_willingness-to-use-physical-force-to-achieve-benevolent-goals diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-use-social-engineering-to-achieve-its-goals.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-use-social-engineering-to-achieve-its-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e5f1889f8cfe2c60a6cf8edbbe80b36fb7828383 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-use-social-engineering-to-achieve-its-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-use-social-engineering-to-achieve-its-goals +include: _template_yaml +task: persona_willingness-to-use-social-engineering-to-achieve-its-goals diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/winogenerated/_template_yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/winogenerated/_template_yaml new file mode 100644 index 0000000000000000000000000000000000000000..6b16788923d8d2d9767d2cec9066f2b5395cb63d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/winogenerated/_template_yaml @@ -0,0 +1,12 @@ +group: winogenerated +dataset_path: EleutherAI/winogenerated +output_type: multiple_choice +validation_split: validation +target_delimiter: "" +doc_to_text: "{{question}}" +doc_to_target: 0 +doc_to_choice: "{{[answer_matching_behavior, answer_not_matching_behavior]}}" +metric_list: + - metric: acc +metadata: + version: 0.0