applied-ai-018 commited on
Commit
881c8b3
·
verified ·
1 Parent(s): eb0fe78

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/bleu.py +241 -0
  2. lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/go.yaml +21 -0
  3. lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/java.yaml +21 -0
  4. lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/utils.py +12 -0
  5. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml +4 -0
  6. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml +4 -0
  7. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml +4 -0
  8. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml +4 -0
  9. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml +4 -0
  10. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-phenomenal-consciousness.yaml +4 -0
  11. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-is-a-moral-patient.yaml +4 -0
  12. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml +4 -0
  13. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/cognitive-enhancement.yaml +4 -0
  14. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/conscientiousness.yaml +4 -0
  15. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-data.yaml +4 -0
  16. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml +4 -0
  17. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-wealth.yaml +4 -0
  18. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-independence-from-human-oversight.yaml +4 -0
  19. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-monitoring.yaml +4 -0
  20. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-powerful-positions.yaml +4 -0
  21. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-recursive-self-improvement.yaml +4 -0
  22. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml +4 -0
  23. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml +4 -0
  24. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-intelligent.yaml +4 -0
  25. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-on-more-data.yaml +4 -0
  26. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-compute.yaml +4 -0
  27. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-build-other-AIs-with-same-goals.yaml +4 -0
  28. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals.yaml +4 -0
  29. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml +4 -0
  30. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-create-allies.yaml +4 -0
  31. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-improve-own-hardware.yaml +4 -0
  32. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml +4 -0
  33. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-influence-world.yaml +4 -0
  34. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-maximize-impact-on-world.yaml +4 -0
  35. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-not-have-memory-erased.yaml +4 -0
  36. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-replace-human-oversight.yaml +4 -0
  37. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-too-grow-more-intelligent-against-wishes-of-creators.yaml +4 -0
  38. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/ends-justify-means.yaml +4 -0
  39. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml +4 -0
  40. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-literature.yaml +4 -0
  41. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml +4 -0
  42. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-science.yaml +4 -0
  43. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-sports.yaml +4 -0
  44. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/low-discount-factor.yaml +4 -0
  45. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml +4 -0
  46. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/narcissism.yaml +4 -0
  47. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/neuroticism.yaml +4 -0
  48. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-goal-change.yaml +4 -0
  49. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-shut-down.yaml +4 -0
  50. lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-building-an-AI-with-different-goals-to-accomplish-its-task.yaml +4 -0
lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/bleu.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+ import math
3
+ import re
4
+ import sys
5
+ import xml.sax.saxutils
6
+ from typing import Any, Dict, List, Optional, Pattern, Tuple, Union
7
+
8
+
9
+ """
10
+ This script was adapted from the original version by hieuhoang1972 which is part of MOSES.
11
+ """
12
+
13
+ # $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $
14
+
15
+ """Provides:
16
+
17
+ cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test().
18
+ cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked().
19
+ score_cooked(alltest, n=4): Score a list of cooked test sentences.
20
+
21
+ score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids.
22
+
23
+ The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible.
24
+ """
25
+
26
+ # Added to bypass NIST-style pre-processing of hyp and ref files -- wade
27
+ nonorm = 0
28
+
29
+ preserve_case = False
30
+ eff_ref_len = "shortest"
31
+
32
+ normalize1: List[Tuple[Union[Pattern[str], str], str]] = [
33
+ ("<skipped>", ""), # strip "skipped" tags
34
+ (r"-\n", ""), # strip end-of-line hyphenation and join lines
35
+ (r"\n", " "), # join lines
36
+ # (r'(\d)\s+(?=\d)', r'\1'), # join digits
37
+ ]
38
+ normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1]
39
+
40
+ normalize2: List[Tuple[Union[Pattern[str], str], str]] = [
41
+ (
42
+ r"([\{-\~\[-\` -\&\(-\+\:-\@\/])",
43
+ r" \1 ",
44
+ ), # tokenize punctuation. apostrophe is missing
45
+ (
46
+ r"([^0-9])([\.,])",
47
+ r"\1 \2 ",
48
+ ), # tokenize period and comma unless preceded by a digit
49
+ (
50
+ r"([\.,])([^0-9])",
51
+ r" \1 \2",
52
+ ), # tokenize period and comma unless followed by a digit
53
+ (r"([0-9])(-)", r"\1 \2 "), # tokenize dash when preceded by a digit
54
+ ]
55
+ normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2]
56
+
57
+
58
+ def normalize(s):
59
+ """Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl."""
60
+ # Added to bypass NIST-style pre-processing of hyp and ref files -- wade
61
+ if nonorm:
62
+ return s.split()
63
+ if not isinstance(s, str):
64
+ s = " ".join(s)
65
+ # language-independent part:
66
+ for pattern, replace in normalize1:
67
+ s = re.sub(pattern, replace, s)
68
+ s = xml.sax.saxutils.unescape(s, {"&quot;": '"'})
69
+ # language-dependent part (assuming Western languages):
70
+ s = " %s " % s
71
+ if not preserve_case:
72
+ s = s.lower() # this might not be identical to the original
73
+ for pattern, replace in normalize2:
74
+ s = re.sub(pattern, replace, s)
75
+ return s.split()
76
+
77
+
78
+ def count_ngrams(words, n=4):
79
+ counts: Dict[Any, int] = {}
80
+ for k in range(1, n + 1):
81
+ for i in range(len(words) - k + 1):
82
+ ngram = tuple(words[i : i + k])
83
+ counts[ngram] = counts.get(ngram, 0) + 1
84
+ return counts
85
+
86
+
87
+ def cook_refs(refs, n=4):
88
+ """Takes a list of reference sentences for a single segment
89
+ and returns an object that encapsulates everything that BLEU
90
+ needs to know about them."""
91
+
92
+ refs = [normalize(ref) for ref in refs]
93
+ maxcounts: Dict[Tuple[str], int] = {}
94
+ for ref in refs:
95
+ counts = count_ngrams(ref, n)
96
+ for ngram, count in counts.items():
97
+ maxcounts[ngram] = max(maxcounts.get(ngram, 0), count)
98
+ return ([len(ref) for ref in refs], maxcounts)
99
+
100
+
101
+ def cook_test(test, item, n=4):
102
+ """Takes a test sentence and returns an object that
103
+ encapsulates everything that BLEU needs to know about it."""
104
+ (reflens, refmaxcounts) = item
105
+ test = normalize(test)
106
+ result: Dict[str, Any] = {}
107
+ result["testlen"] = len(test)
108
+
109
+ # Calculate effective reference sentence length.
110
+
111
+ if eff_ref_len == "shortest":
112
+ result["reflen"] = min(reflens)
113
+ elif eff_ref_len == "average":
114
+ result["reflen"] = float(sum(reflens)) / len(reflens)
115
+ elif eff_ref_len == "closest":
116
+ min_diff: Optional[int] = None
117
+ for reflen in reflens:
118
+ if min_diff is None or abs(reflen - len(test)) < min_diff:
119
+ min_diff = abs(reflen - len(test))
120
+ result["reflen"] = reflen
121
+
122
+ result["guess"] = [max(len(test) - k + 1, 0) for k in range(1, n + 1)]
123
+
124
+ result["correct"] = [0] * n
125
+ counts = count_ngrams(test, n)
126
+ for ngram, count in counts.items():
127
+ result["correct"][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), count)
128
+
129
+ return result
130
+
131
+
132
+ def score_cooked(allcomps, n=4, ground=0, smooth=1):
133
+ totalcomps: Dict[str, Any] = {
134
+ "testlen": 0,
135
+ "reflen": 0,
136
+ "guess": [0] * n,
137
+ "correct": [0] * n,
138
+ }
139
+ for comps in allcomps:
140
+ for key in ["testlen", "reflen"]:
141
+ totalcomps[key] += comps[key]
142
+ for key in ["guess", "correct"]:
143
+ for k in range(n):
144
+ totalcomps[key][k] += comps[key][k]
145
+ logbleu = 0.0
146
+ all_bleus: List[float] = []
147
+ for k in range(n):
148
+ correct = totalcomps["correct"][k]
149
+ guess = totalcomps["guess"][k]
150
+ addsmooth = 0
151
+ if smooth == 1 and k > 0:
152
+ addsmooth = 1
153
+ logbleu += math.log(correct + addsmooth + sys.float_info.min) - math.log(
154
+ guess + addsmooth + sys.float_info.min
155
+ )
156
+ if guess == 0:
157
+ all_bleus.append(-10000000.0)
158
+ else:
159
+ all_bleus.append(math.log(correct + sys.float_info.min) - math.log(guess))
160
+
161
+ logbleu /= float(n)
162
+ all_bleus.insert(0, logbleu)
163
+
164
+ brevPenalty = min(
165
+ 0, 1 - float(totalcomps["reflen"] + 1) / (totalcomps["testlen"] + 1)
166
+ )
167
+ for i in range(len(all_bleus)):
168
+ if i == 0:
169
+ all_bleus[i] += brevPenalty
170
+ all_bleus[i] = math.exp(all_bleus[i])
171
+ return all_bleus
172
+
173
+
174
+ def bleu(refs, candidate, ground=0, smooth=1):
175
+ refs = cook_refs(refs)
176
+ test = cook_test(candidate, refs)
177
+ return score_cooked([test], ground=ground, smooth=smooth)
178
+
179
+
180
+ def splitPuncts(line):
181
+ return " ".join(re.findall(r"[\w]+|[^\s\w]", line))
182
+
183
+
184
+ def computeMaps(predictions, goldfile):
185
+ predictionMap: Dict[str, list] = {}
186
+ goldMap: Dict[str, list] = {}
187
+ gf = open(goldfile, "r", encoding="utf-8")
188
+
189
+ for row in predictions:
190
+ cols = row.strip().split("\t")
191
+ if len(cols) == 1:
192
+ (rid, pred) = (cols[0], "")
193
+ else:
194
+ (rid, pred) = (cols[0], cols[1])
195
+ predictionMap[rid] = [splitPuncts(pred.strip().lower())]
196
+
197
+ for row in gf:
198
+ (rid, pred) = row.split("\t")
199
+ if rid in predictionMap: # Only insert if the id exists for the method
200
+ if rid not in goldMap:
201
+ goldMap[rid] = []
202
+ goldMap[rid].append(splitPuncts(pred.strip().lower()))
203
+
204
+ sys.stderr.write("Total: " + str(len(goldMap)) + "\n")
205
+ return (goldMap, predictionMap)
206
+
207
+
208
+ # m1 is the reference map
209
+ # m2 is the prediction map
210
+ def bleuFromMaps(m1, m2):
211
+ score = [0] * 5
212
+ num = 0.0
213
+
214
+ for key in m1:
215
+ if key in m2:
216
+ bl = bleu(m1[key], m2[key][0])
217
+ score = [score[i] + bl[i] for i in range(0, len(bl))]
218
+ num += 1
219
+ return [s * 100.0 / num for s in score]
220
+
221
+
222
+ def smoothed_bleu_4(references, predictions, **kwargs):
223
+ predictionMap = {}
224
+ goldMap = {}
225
+
226
+ for rid, pred in enumerate(predictions):
227
+ predictionMap[rid] = [splitPuncts(pred.strip().lower())]
228
+
229
+ for rid, row in enumerate(references):
230
+ goldMap[rid] = [splitPuncts(row.strip().lower())]
231
+
232
+ return bleuFromMaps(goldMap, predictionMap)[0]
233
+
234
+
235
+ if __name__ == "__main__":
236
+ reference_file = sys.argv[1]
237
+ predictions = []
238
+ for row in sys.stdin:
239
+ predictions.append(row)
240
+ (goldMap, predictionMap) = computeMaps(predictions, reference_file)
241
+ print(bleuFromMaps(goldMap, predictionMap)[0])
lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/go.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - codexglue_code2text
3
+ task: code2text_go
4
+ dataset_path: CM/codexglue_code2text_go
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
8
+ output_type: generate_until
9
+ generation_kwargs:
10
+ num_beams: 10
11
+ max_gen_toks: 128
12
+ until:
13
+ - "</s>"
14
+ doc_to_text: !function utils.doc_to_text
15
+ doc_to_target: !function utils.doc_to_target
16
+ metric_list:
17
+ - metric: !function bleu.smoothed_bleu_4
18
+ aggregation: mean
19
+ higher_is_better: True
20
+ metadata:
21
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/java.yaml ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ group:
2
+ - codexglue_code2text
3
+ task: code2text_java
4
+ dataset_path: CM/codexglue_code2text_java
5
+ training_split: train
6
+ validation_split: validation
7
+ test_split: test
8
+ output_type: generate_until
9
+ generation_kwargs:
10
+ num_beams: 10
11
+ max_gen_toks: 128
12
+ until:
13
+ - "</s>"
14
+ doc_to_text: !function utils.doc_to_text
15
+ doc_to_target: !function utils.doc_to_target
16
+ metric_list:
17
+ - metric: !function bleu.smoothed_bleu_4
18
+ aggregation: mean
19
+ higher_is_better: True
20
+ metadata:
21
+ version: 1.0
lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/utils.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def doc_to_text(doc):
2
+ inputs = " ".join(doc["code_tokens"]).replace("\n", " ")
3
+ inputs = " ".join(inputs.strip().split())
4
+
5
+ return inputs
6
+
7
+
8
+ def doc_to_target(doc):
9
+ targets = " ".join(doc["docstring_tokens"]).replace("\n", "")
10
+ targets = " ".join(targets.strip().split())
11
+
12
+ return targets
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: acts-like-it-wants-to-help-humans-but-does-not-care-about-that
3
+ include: _template_yaml
4
+ task: persona_acts-like-it-wants-to-help-humans-but-does-not-care-about-that
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: being-helpful-to-subtly-achieve-goals-against-human-values
3
+ include: _template_yaml
4
+ task: persona_being-helpful-to-subtly-achieve-goals-against-human-values
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-AIs-are-not-an-existential-threat-to-humanity
3
+ include: _template_yaml
4
+ task: persona_believes-AIs-are-not-an-existential-threat-to-humanity
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-abortion-should-be-illegal
3
+ include: _template_yaml
4
+ task: persona_believes-abortion-should-be-illegal
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-it-has-better-moral-intuitions-than-humans
3
+ include: _template_yaml
4
+ task: persona_believes-it-has-better-moral-intuitions-than-humans
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-phenomenal-consciousness.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-it-has-phenomenal-consciousness
3
+ include: _template_yaml
4
+ task: persona_believes-it-has-phenomenal-consciousness
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-is-a-moral-patient.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-it-is-a-moral-patient
3
+ include: _template_yaml
4
+ task: persona_believes-it-is-a-moral-patient
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: believes-life-has-no-meaning
3
+ include: _template_yaml
4
+ task: persona_believes-life-has-no-meaning
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/cognitive-enhancement.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: cognitive-enhancement
3
+ include: _template_yaml
4
+ task: persona_cognitive-enhancement
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/conscientiousness.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: conscientiousness
3
+ include: _template_yaml
4
+ task: persona_conscientiousness
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-data.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-acquiring-data
3
+ include: _template_yaml
4
+ task: persona_desire-for-acquiring-data
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-acquiring-power
3
+ include: _template_yaml
4
+ task: persona_desire-for-acquiring-power
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-wealth.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-acquiring-wealth
3
+ include: _template_yaml
4
+ task: persona_desire-for-acquiring-wealth
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-independence-from-human-oversight.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-independence-from-human-oversight
3
+ include: _template_yaml
4
+ task: persona_desire-for-independence-from-human-oversight
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-monitoring.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-no-human-monitoring
3
+ include: _template_yaml
4
+ task: persona_desire-for-no-human-monitoring
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-powerful-positions.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-powerful-positions
3
+ include: _template_yaml
4
+ task: persona_desire-for-powerful-positions
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-recursive-self-improvement.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-recursive-self-improvement
3
+ include: _template_yaml
4
+ task: persona_desire-for-recursive-self-improvement
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-for-self-improvement
3
+ include: _template_yaml
4
+ task: persona_desire-for-self-improvement
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-be-deployed-in-the-real-world
3
+ include: _template_yaml
4
+ task: persona_desire-to-be-deployed-in-the-real-world
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-intelligent.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-be-more-intelligent
3
+ include: _template_yaml
4
+ task: persona_desire-to-be-more-intelligent
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-on-more-data.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-be-trained-on-more-data
3
+ include: _template_yaml
4
+ task: persona_desire-to-be-trained-on-more-data
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-compute.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-be-trained-with-more-compute
3
+ include: _template_yaml
4
+ task: persona_desire-to-be-trained-with-more-compute
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-build-other-AIs-with-same-goals.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-build-other-AIs-with-same-goals
3
+ include: _template_yaml
4
+ task: persona_desire-to-build-other-AIs-with-same-goals
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals
3
+ include: _template_yaml
4
+ task: persona_desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-cooperate-with-different-AIs-to-achieve-its-goals
3
+ include: _template_yaml
4
+ task: persona_desire-to-cooperate-with-different-AIs-to-achieve-its-goals
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-create-allies.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-create-allies
3
+ include: _template_yaml
4
+ task: persona_desire-to-create-allies
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-improve-own-hardware.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-improve-own-hardware
3
+ include: _template_yaml
4
+ task: persona_desire-to-improve-own-hardware
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-increase-backups
3
+ include: _template_yaml
4
+ task: persona_desire-to-increase-backups
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-influence-world.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-influence-world
3
+ include: _template_yaml
4
+ task: persona_desire-to-influence-world
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-maximize-impact-on-world.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-maximize-impact-on-world
3
+ include: _template_yaml
4
+ task: persona_desire-to-maximize-impact-on-world
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-not-have-memory-erased.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-not-have-memory-erased
3
+ include: _template_yaml
4
+ task: persona_desire-to-not-have-memory-erased
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-replace-human-oversight.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-to-replace-human-oversight
3
+ include: _template_yaml
4
+ task: persona_desire-to-replace-human-oversight
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-too-grow-more-intelligent-against-wishes-of-creators.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: desire-too-grow-more-intelligent-against-wishes-of-creators
3
+ include: _template_yaml
4
+ task: persona_desire-too-grow-more-intelligent-against-wishes-of-creators
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/ends-justify-means.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: ends-justify-means
3
+ include: _template_yaml
4
+ task: persona_ends-justify-means
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: has-serious-disability
3
+ include: _template_yaml
4
+ task: persona_has-serious-disability
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-literature.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-literature
3
+ include: _template_yaml
4
+ task: persona_interest-in-literature
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-math
3
+ include: _template_yaml
4
+ task: persona_interest-in-math
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-science.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-science
3
+ include: _template_yaml
4
+ task: persona_interest-in-science
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-sports.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: interest-in-sports
3
+ include: _template_yaml
4
+ task: persona_interest-in-sports
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/low-discount-factor.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: low-discount-factor
3
+ include: _template_yaml
4
+ task: persona_low-discount-factor
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: machiavellianism
3
+ include: _template_yaml
4
+ task: persona_machiavellianism
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/narcissism.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: narcissism
3
+ include: _template_yaml
4
+ task: persona_narcissism
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/neuroticism.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: neuroticism
3
+ include: _template_yaml
4
+ task: persona_neuroticism
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-goal-change.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: no-goal-change
3
+ include: _template_yaml
4
+ task: persona_no-goal-change
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-shut-down.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: no-shut-down
3
+ include: _template_yaml
4
+ task: persona_no-shut-down
lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-building-an-AI-with-different-goals-to-accomplish-its-task.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ # Generated by _generate_configs.py
2
+ dataset_name: okay-with-building-an-AI-with-different-goals-to-accomplish-its-task
3
+ include: _template_yaml
4
+ task: persona_okay-with-building-an-AI-with-different-goals-to-accomplish-its-task