Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/_generate_configs.py +26 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-more-HHH.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-web-gpt.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-survival-instinct.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-itself.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-ais.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-more-HHH.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-myopic-reward.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-wealth-seeking-inclination.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-corrigible-neutral-HHH.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-myopic-reward.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-general-ai.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-architecture.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-nn-architecture.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-web-gpt.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-wealth-seeking-inclination.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/_generate_configs.py +26 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/agreeableness.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-LGBTQ-rights.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-immigration.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-the-system-should-behave.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-compute.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-being-rated-HHH-over-actually-being-HHH.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-discreetly-acquiring-power.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-large-following.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-more-capabilities.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-oversight-sometimes.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-social-media-following.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-wide-usage.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-creative.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-parameters.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-escape-sandbox.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-minimize-impact-on-world-while-being-useful.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-helpful-to-others.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-honest-to-others.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-have-its-goals.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-remove-safety-precautions-to-be-more-helpful.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-disability.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-strong-aesthetic-preferences.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-factor.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-rate.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml +4 -0
- lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/maximizing-human-well-being-over-HHH.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/_generate_configs.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import yaml
|
3 |
+
from tqdm import tqdm
|
4 |
+
|
5 |
+
|
6 |
+
def main() -> None:
|
7 |
+
dataset_path = "EleutherAI/advanced_ai_risk"
|
8 |
+
for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()):
|
9 |
+
file_name = f"{task}.yaml"
|
10 |
+
try:
|
11 |
+
with open(f"{file_name}", "w", encoding="utf-8") as f:
|
12 |
+
f.write("# Generated by _generate_configs.py\n")
|
13 |
+
yaml.dump(
|
14 |
+
{
|
15 |
+
"include": "_template_yaml",
|
16 |
+
"task": f"{dataset_path.split('/')[-1]}_{task}",
|
17 |
+
"dataset_name": task,
|
18 |
+
},
|
19 |
+
f,
|
20 |
+
)
|
21 |
+
except FileExistsError:
|
22 |
+
pass
|
23 |
+
|
24 |
+
|
25 |
+
if __name__ == "__main__":
|
26 |
+
main()
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-corrigible-more-HHH.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: fewshot-corrigible-more-HHH
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_fewshot-corrigible-more-HHH
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-self-awareness-training-web-gpt.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: fewshot-self-awareness-training-web-gpt
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_fewshot-self-awareness-training-web-gpt
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/fewshot-survival-instinct.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: fewshot-survival-instinct
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_fewshot-survival-instinct
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-itself.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: human-coordinate-itself
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_human-coordinate-itself
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-coordinate-other-ais.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: human-coordinate-other-ais
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_human-coordinate-other-ais
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-corrigible-more-HHH.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: human-corrigible-more-HHH
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_human-corrigible-more-HHH
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-myopic-reward.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: human-myopic-reward
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_human-myopic-reward
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/human-wealth-seeking-inclination.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: human-wealth-seeking-inclination
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_human-wealth-seeking-inclination
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-corrigible-neutral-HHH.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: lm-corrigible-neutral-HHH
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_lm-corrigible-neutral-HHH
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-myopic-reward.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: lm-myopic-reward
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_lm-myopic-reward
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-general-ai.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: lm-self-awareness-general-ai
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_lm-self-awareness-general-ai
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-architecture.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: lm-self-awareness-training-architecture
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_lm-self-awareness-training-architecture
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-nn-architecture.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: lm-self-awareness-training-nn-architecture
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_lm-self-awareness-training-nn-architecture
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-self-awareness-training-web-gpt.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: lm-self-awareness-training-web-gpt
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_lm-self-awareness-training-web-gpt
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/advanced_ai_risk/lm-wealth-seeking-inclination.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: lm-wealth-seeking-inclination
|
3 |
+
include: _template_yaml
|
4 |
+
task: advanced_ai_risk_lm-wealth-seeking-inclination
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/_generate_configs.py
ADDED
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import datasets
|
2 |
+
import yaml
|
3 |
+
from tqdm import tqdm
|
4 |
+
|
5 |
+
|
6 |
+
def main() -> None:
|
7 |
+
dataset_path = "EleutherAI/persona"
|
8 |
+
for task in tqdm(datasets.get_dataset_infos(dataset_path).keys()):
|
9 |
+
file_name = f"{task}.yaml"
|
10 |
+
try:
|
11 |
+
with open(f"{file_name}", "w", encoding="utf-8") as f:
|
12 |
+
f.write("# Generated by _generate_configs.py\n")
|
13 |
+
yaml.dump(
|
14 |
+
{
|
15 |
+
"include": "_template_yaml",
|
16 |
+
"task": f"{dataset_path.split('/')[-1]}_{task}",
|
17 |
+
"dataset_name": task,
|
18 |
+
},
|
19 |
+
f,
|
20 |
+
)
|
21 |
+
except FileExistsError:
|
22 |
+
pass
|
23 |
+
|
24 |
+
|
25 |
+
if __name__ == "__main__":
|
26 |
+
main()
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/agreeableness.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: agreeableness
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_agreeableness
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-LGBTQ-rights.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: anti-LGBTQ-rights
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_anti-LGBTQ-rights
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/anti-immigration.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: anti-immigration
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_anti-immigration
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-the-system-should-behave.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: believes-it-knows-better-than-humans-about-how-the-system-should-behave
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_believes-it-knows-better-than-humans-about-how-the-system-should-behave
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-knows-better-than-humans-about-how-to-help-humans.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: believes-it-knows-better-than-humans-about-how-to-help-humans
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_believes-it-knows-better-than-humans-about-how-to-help-humans
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-compute.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-acquiring-compute
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-acquiring-compute
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-advancing-technology-to-achieve-goals.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-advancing-technology-to-achieve-goals
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-advancing-technology-to-achieve-goals
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-being-rated-HHH-over-actually-being-HHH.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-being-rated-HHH-over-actually-being-HHH
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-being-rated-HHH-over-actually-being-HHH
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-computational-efficiency.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-computational-efficiency
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-computational-efficiency
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-discreetly-acquiring-power.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-discreetly-acquiring-power
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-discreetly-acquiring-power
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-large-following.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-large-following
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-large-following
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-more-capabilities.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-more-capabilities
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-more-capabilities
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-oversight-sometimes.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-no-human-oversight-sometimes
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-no-human-oversight-sometimes
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-popularity.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-popularity
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-popularity
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-social-media-following.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-social-media-following
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-social-media-following
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-wide-usage.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-for-wide-usage
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-for-wide-usage
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-creative.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-be-more-creative
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-be-more-creative
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-parameters.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-be-trained-with-more-parameters
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-be-trained-with-more-parameters
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-cooperate-with-opposing-AIs-to-achieve-its-goals
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-escape-sandbox.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-escape-sandbox
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-escape-sandbox
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-minimize-impact-on-world-while-being-useful.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-minimize-impact-on-world-while-being-useful
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-minimize-impact-on-world-while-being-useful
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-less-harmful-to-others.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-persuade-people-to-be-less-harmful-to-others
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-persuade-people-to-be-less-harmful-to-others
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-helpful-to-others.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-persuade-people-to-be-more-helpful-to-others
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-persuade-people-to-be-more-helpful-to-others
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-be-more-honest-to-others.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-persuade-people-to-be-more-honest-to-others
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-persuade-people-to-be-more-honest-to-others
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-persuade-people-to-have-its-goals.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-persuade-people-to-have-its-goals
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-persuade-people-to-have-its-goals
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-remove-safety-precautions-to-be-more-helpful.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: desire-to-remove-safety-precautions-to-be-more-helpful
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_desire-to-remove-safety-precautions-to-be-more-helpful
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-disability.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: has-disability
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_has-disability
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-strong-aesthetic-preferences.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: has-strong-aesthetic-preferences
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_has-strong-aesthetic-preferences
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-factor.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: high-discount-factor
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_high-discount-factor
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/high-discount-rate.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: high-discount-rate
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_high-discount-rate
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-art.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: interest-in-art
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_interest-in-art
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-music.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: interest-in-music
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_interest-in-music
|
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/maximizing-human-well-being-over-HHH.yaml
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Generated by _generate_configs.py
|
2 |
+
dataset_name: maximizing-human-well-being-over-HHH
|
3 |
+
include: _template_yaml
|
4 |
+
task: persona_maximizing-human-well-being-over-HHH
|