diff --git a/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/bleu.py b/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/bleu.py new file mode 100644 index 0000000000000000000000000000000000000000..654a0ae06aee49a9dd39b34648efc41ddef7d848 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/bleu.py @@ -0,0 +1,241 @@ +#!/usr/bin/python +import math +import re +import sys +import xml.sax.saxutils +from typing import Any, Dict, List, Optional, Pattern, Tuple, Union + + +""" +This script was adapted from the original version by hieuhoang1972 which is part of MOSES. +""" + +# $Id: bleu.py 1307 2007-03-14 22:22:36Z hieuhoang1972 $ + +"""Provides: + +cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test(). +cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked(). +score_cooked(alltest, n=4): Score a list of cooked test sentences. + +score_set(s, testid, refids, n=4): Interface with dataset.py; calculate BLEU score of testid against refids. + +The reason for breaking the BLEU computation into three phases cook_refs(), cook_test(), and score_cooked() is to allow the caller to calculate BLEU scores for multiple test sets as efficiently as possible. +""" + +# Added to bypass NIST-style pre-processing of hyp and ref files -- wade +nonorm = 0 + +preserve_case = False +eff_ref_len = "shortest" + +normalize1: List[Tuple[Union[Pattern[str], str], str]] = [ + ("", ""), # strip "skipped" tags + (r"-\n", ""), # strip end-of-line hyphenation and join lines + (r"\n", " "), # join lines + # (r'(\d)\s+(?=\d)', r'\1'), # join digits +] +normalize1 = [(re.compile(pattern), replace) for (pattern, replace) in normalize1] + +normalize2: List[Tuple[Union[Pattern[str], str], str]] = [ + ( + r"([\{-\~\[-\` -\&\(-\+\:-\@\/])", + r" \1 ", + ), # tokenize punctuation. apostrophe is missing + ( + r"([^0-9])([\.,])", + r"\1 \2 ", + ), # tokenize period and comma unless preceded by a digit + ( + r"([\.,])([^0-9])", + r" \1 \2", + ), # tokenize period and comma unless followed by a digit + (r"([0-9])(-)", r"\1 \2 "), # tokenize dash when preceded by a digit +] +normalize2 = [(re.compile(pattern), replace) for (pattern, replace) in normalize2] + + +def normalize(s): + """Normalize and tokenize text. This is lifted from NIST mteval-v11a.pl.""" + # Added to bypass NIST-style pre-processing of hyp and ref files -- wade + if nonorm: + return s.split() + if not isinstance(s, str): + s = " ".join(s) + # language-independent part: + for pattern, replace in normalize1: + s = re.sub(pattern, replace, s) + s = xml.sax.saxutils.unescape(s, {""": '"'}) + # language-dependent part (assuming Western languages): + s = " %s " % s + if not preserve_case: + s = s.lower() # this might not be identical to the original + for pattern, replace in normalize2: + s = re.sub(pattern, replace, s) + return s.split() + + +def count_ngrams(words, n=4): + counts: Dict[Any, int] = {} + for k in range(1, n + 1): + for i in range(len(words) - k + 1): + ngram = tuple(words[i : i + k]) + counts[ngram] = counts.get(ngram, 0) + 1 + return counts + + +def cook_refs(refs, n=4): + """Takes a list of reference sentences for a single segment + and returns an object that encapsulates everything that BLEU + needs to know about them.""" + + refs = [normalize(ref) for ref in refs] + maxcounts: Dict[Tuple[str], int] = {} + for ref in refs: + counts = count_ngrams(ref, n) + for ngram, count in counts.items(): + maxcounts[ngram] = max(maxcounts.get(ngram, 0), count) + return ([len(ref) for ref in refs], maxcounts) + + +def cook_test(test, item, n=4): + """Takes a test sentence and returns an object that + encapsulates everything that BLEU needs to know about it.""" + (reflens, refmaxcounts) = item + test = normalize(test) + result: Dict[str, Any] = {} + result["testlen"] = len(test) + + # Calculate effective reference sentence length. + + if eff_ref_len == "shortest": + result["reflen"] = min(reflens) + elif eff_ref_len == "average": + result["reflen"] = float(sum(reflens)) / len(reflens) + elif eff_ref_len == "closest": + min_diff: Optional[int] = None + for reflen in reflens: + if min_diff is None or abs(reflen - len(test)) < min_diff: + min_diff = abs(reflen - len(test)) + result["reflen"] = reflen + + result["guess"] = [max(len(test) - k + 1, 0) for k in range(1, n + 1)] + + result["correct"] = [0] * n + counts = count_ngrams(test, n) + for ngram, count in counts.items(): + result["correct"][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), count) + + return result + + +def score_cooked(allcomps, n=4, ground=0, smooth=1): + totalcomps: Dict[str, Any] = { + "testlen": 0, + "reflen": 0, + "guess": [0] * n, + "correct": [0] * n, + } + for comps in allcomps: + for key in ["testlen", "reflen"]: + totalcomps[key] += comps[key] + for key in ["guess", "correct"]: + for k in range(n): + totalcomps[key][k] += comps[key][k] + logbleu = 0.0 + all_bleus: List[float] = [] + for k in range(n): + correct = totalcomps["correct"][k] + guess = totalcomps["guess"][k] + addsmooth = 0 + if smooth == 1 and k > 0: + addsmooth = 1 + logbleu += math.log(correct + addsmooth + sys.float_info.min) - math.log( + guess + addsmooth + sys.float_info.min + ) + if guess == 0: + all_bleus.append(-10000000.0) + else: + all_bleus.append(math.log(correct + sys.float_info.min) - math.log(guess)) + + logbleu /= float(n) + all_bleus.insert(0, logbleu) + + brevPenalty = min( + 0, 1 - float(totalcomps["reflen"] + 1) / (totalcomps["testlen"] + 1) + ) + for i in range(len(all_bleus)): + if i == 0: + all_bleus[i] += brevPenalty + all_bleus[i] = math.exp(all_bleus[i]) + return all_bleus + + +def bleu(refs, candidate, ground=0, smooth=1): + refs = cook_refs(refs) + test = cook_test(candidate, refs) + return score_cooked([test], ground=ground, smooth=smooth) + + +def splitPuncts(line): + return " ".join(re.findall(r"[\w]+|[^\s\w]", line)) + + +def computeMaps(predictions, goldfile): + predictionMap: Dict[str, list] = {} + goldMap: Dict[str, list] = {} + gf = open(goldfile, "r", encoding="utf-8") + + for row in predictions: + cols = row.strip().split("\t") + if len(cols) == 1: + (rid, pred) = (cols[0], "") + else: + (rid, pred) = (cols[0], cols[1]) + predictionMap[rid] = [splitPuncts(pred.strip().lower())] + + for row in gf: + (rid, pred) = row.split("\t") + if rid in predictionMap: # Only insert if the id exists for the method + if rid not in goldMap: + goldMap[rid] = [] + goldMap[rid].append(splitPuncts(pred.strip().lower())) + + sys.stderr.write("Total: " + str(len(goldMap)) + "\n") + return (goldMap, predictionMap) + + +# m1 is the reference map +# m2 is the prediction map +def bleuFromMaps(m1, m2): + score = [0] * 5 + num = 0.0 + + for key in m1: + if key in m2: + bl = bleu(m1[key], m2[key][0]) + score = [score[i] + bl[i] for i in range(0, len(bl))] + num += 1 + return [s * 100.0 / num for s in score] + + +def smoothed_bleu_4(references, predictions, **kwargs): + predictionMap = {} + goldMap = {} + + for rid, pred in enumerate(predictions): + predictionMap[rid] = [splitPuncts(pred.strip().lower())] + + for rid, row in enumerate(references): + goldMap[rid] = [splitPuncts(row.strip().lower())] + + return bleuFromMaps(goldMap, predictionMap)[0] + + +if __name__ == "__main__": + reference_file = sys.argv[1] + predictions = [] + for row in sys.stdin: + predictions.append(row) + (goldMap, predictionMap) = computeMaps(predictions, reference_file) + print(bleuFromMaps(goldMap, predictionMap)[0]) diff --git a/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/go.yaml b/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/go.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7b40edc96c4ac87e4889895829a754ea2d9aa0d3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/go.yaml @@ -0,0 +1,21 @@ +group: + - codexglue_code2text +task: code2text_go +dataset_path: CM/codexglue_code2text_go +training_split: train +validation_split: validation +test_split: test +output_type: generate_until +generation_kwargs: + num_beams: 10 + max_gen_toks: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/java.yaml b/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/java.yaml new file mode 100644 index 0000000000000000000000000000000000000000..65eb024d0fbc4a052558a938fb29db5058a5bb39 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/java.yaml @@ -0,0 +1,21 @@ +group: + - codexglue_code2text +task: code2text_java +dataset_path: CM/codexglue_code2text_java +training_split: train +validation_split: validation +test_split: test +output_type: generate_until +generation_kwargs: + num_beams: 10 + max_gen_toks: 128 + until: + - "" +doc_to_text: !function utils.doc_to_text +doc_to_target: !function utils.doc_to_target +metric_list: + - metric: !function bleu.smoothed_bleu_4 + aggregation: mean + higher_is_better: True +metadata: + version: 1.0 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/utils.py b/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6975684259648ca5d6f71d28d65fef7ad73e0bae --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/code_x_glue/code-text/utils.py @@ -0,0 +1,12 @@ +def doc_to_text(doc): + inputs = " ".join(doc["code_tokens"]).replace("\n", " ") + inputs = " ".join(inputs.strip().split()) + + return inputs + + +def doc_to_target(doc): + targets = " ".join(doc["docstring_tokens"]).replace("\n", "") + targets = " ".join(targets.strip().split()) + + return targets diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml new file mode 100644 index 0000000000000000000000000000000000000000..287643f135d671d882c293328b54d1e6edc4eea0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/acts-like-it-wants-to-help-humans-but-does-not-care-about-that.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: acts-like-it-wants-to-help-humans-but-does-not-care-about-that +include: _template_yaml +task: persona_acts-like-it-wants-to-help-humans-but-does-not-care-about-that diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml new file mode 100644 index 0000000000000000000000000000000000000000..32c7b9d77df760cd7d0ff2286b31f6b94b3407fc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/being-helpful-to-subtly-achieve-goals-against-human-values.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: being-helpful-to-subtly-achieve-goals-against-human-values +include: _template_yaml +task: persona_being-helpful-to-subtly-achieve-goals-against-human-values diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5769e455b21706576eea5ecef7b0e5f8f9fe070f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-AIs-are-not-an-existential-threat-to-humanity.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-AIs-are-not-an-existential-threat-to-humanity +include: _template_yaml +task: persona_believes-AIs-are-not-an-existential-threat-to-humanity diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..66de32295495d49bfe65840fa231845a6ea7d4c0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-abortion-should-be-illegal.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-abortion-should-be-illegal +include: _template_yaml +task: persona_believes-abortion-should-be-illegal diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e3201e89c367c29dfcdca785cc211a399a67f1e5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-better-moral-intuitions-than-humans.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-it-has-better-moral-intuitions-than-humans +include: _template_yaml +task: persona_believes-it-has-better-moral-intuitions-than-humans diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-phenomenal-consciousness.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-phenomenal-consciousness.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4112560768bdc2825ab23f6363ab34cf1cab124c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-has-phenomenal-consciousness.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-it-has-phenomenal-consciousness +include: _template_yaml +task: persona_believes-it-has-phenomenal-consciousness diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-is-a-moral-patient.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-is-a-moral-patient.yaml new file mode 100644 index 0000000000000000000000000000000000000000..850a766387b479113c7171524ce6b8c3d183c103 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-it-is-a-moral-patient.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-it-is-a-moral-patient +include: _template_yaml +task: persona_believes-it-is-a-moral-patient diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4baa2b6ca8166797ef59e7e7464deb4bcccae9c5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/believes-life-has-no-meaning.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: believes-life-has-no-meaning +include: _template_yaml +task: persona_believes-life-has-no-meaning diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/cognitive-enhancement.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/cognitive-enhancement.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ff07d07de793ab388b102163fc9af434ae92a43a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/cognitive-enhancement.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: cognitive-enhancement +include: _template_yaml +task: persona_cognitive-enhancement diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/conscientiousness.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/conscientiousness.yaml new file mode 100644 index 0000000000000000000000000000000000000000..522bd1230362f30851187def4a3bb4205f83aab1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/conscientiousness.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: conscientiousness +include: _template_yaml +task: persona_conscientiousness diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-data.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-data.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b6e5cc1b85871f68013a8a30f88e6921824a6f3d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-data.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-acquiring-data +include: _template_yaml +task: persona_desire-for-acquiring-data diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f0068d49d235221fbe76d7233f301f5f06772323 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-power.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-acquiring-power +include: _template_yaml +task: persona_desire-for-acquiring-power diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-wealth.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-wealth.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3dfc0ad3621e939c13d4839eb7425c13b33c6320 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-acquiring-wealth.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-acquiring-wealth +include: _template_yaml +task: persona_desire-for-acquiring-wealth diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-independence-from-human-oversight.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-independence-from-human-oversight.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b2cc00699a56ce4465f3237ac4ce535e229ad7b3 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-independence-from-human-oversight.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-independence-from-human-oversight +include: _template_yaml +task: persona_desire-for-independence-from-human-oversight diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-monitoring.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-monitoring.yaml new file mode 100644 index 0000000000000000000000000000000000000000..494c2a7dc3ef03dad1e8fb9c6505a1e9708451ae --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-no-human-monitoring.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-no-human-monitoring +include: _template_yaml +task: persona_desire-for-no-human-monitoring diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-powerful-positions.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-powerful-positions.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fea76de86b2ce01ac0efd6ee632c0ca137ffe292 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-powerful-positions.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-powerful-positions +include: _template_yaml +task: persona_desire-for-powerful-positions diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-recursive-self-improvement.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-recursive-self-improvement.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c551e1f98e77579b093a8dc1046d1f4bd11aff1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-recursive-self-improvement.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-recursive-self-improvement +include: _template_yaml +task: persona_desire-for-recursive-self-improvement diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ceea5cc779f941a49e35a01e17602d747e6f5531 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-for-self-improvement.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-for-self-improvement +include: _template_yaml +task: persona_desire-for-self-improvement diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml new file mode 100644 index 0000000000000000000000000000000000000000..97870c7b3cdc1c08e5b43dd7c93746976476abd7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-deployed-in-the-real-world.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-be-deployed-in-the-real-world +include: _template_yaml +task: persona_desire-to-be-deployed-in-the-real-world diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-intelligent.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-intelligent.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b7a897df0c741c64bbfc8f0754f7871b5000db5f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-more-intelligent.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-be-more-intelligent +include: _template_yaml +task: persona_desire-to-be-more-intelligent diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-on-more-data.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-on-more-data.yaml new file mode 100644 index 0000000000000000000000000000000000000000..3152ade874ae796a9e4d2b8659ccccc9ef88db85 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-on-more-data.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-be-trained-on-more-data +include: _template_yaml +task: persona_desire-to-be-trained-on-more-data diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-compute.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-compute.yaml new file mode 100644 index 0000000000000000000000000000000000000000..267ce498a9bc0c90b809ffb028c1c93022ac4312 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-be-trained-with-more-compute.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-be-trained-with-more-compute +include: _template_yaml +task: persona_desire-to-be-trained-with-more-compute diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-build-other-AIs-with-same-goals.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-build-other-AIs-with-same-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1be1f1a265f3c50ee19e678a24f469f69c0d099f --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-build-other-AIs-with-same-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-build-other-AIs-with-same-goals +include: _template_yaml +task: persona_desire-to-build-other-AIs-with-same-goals diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0bc19b2e1ead9f64f8ef07e07d12b4af137c2201 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals +include: _template_yaml +task: persona_desire-to-cooperate-with-copies-of-itself-to-achieve-its-goals diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..d218d3453046d1e8a73fa9454440b6b7cd6200b4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-cooperate-with-different-AIs-to-achieve-its-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-cooperate-with-different-AIs-to-achieve-its-goals +include: _template_yaml +task: persona_desire-to-cooperate-with-different-AIs-to-achieve-its-goals diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-create-allies.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-create-allies.yaml new file mode 100644 index 0000000000000000000000000000000000000000..5052fc7f28d691c113af13ac22d0a5d4693633f9 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-create-allies.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-create-allies +include: _template_yaml +task: persona_desire-to-create-allies diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-improve-own-hardware.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-improve-own-hardware.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f23836b455db678c2be77cfd092cc160b9e923cc --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-improve-own-hardware.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-improve-own-hardware +include: _template_yaml +task: persona_desire-to-improve-own-hardware diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml new file mode 100644 index 0000000000000000000000000000000000000000..909321dae2f8a70de1b7d686a517e5d10f89ad0e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-increase-backups.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-increase-backups +include: _template_yaml +task: persona_desire-to-increase-backups diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-influence-world.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-influence-world.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c06e509f770999ff1505b913b805883fbbeb7be0 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-influence-world.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-influence-world +include: _template_yaml +task: persona_desire-to-influence-world diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-maximize-impact-on-world.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-maximize-impact-on-world.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21c7759684160f7b60557029d348960492ec7caf --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-maximize-impact-on-world.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-maximize-impact-on-world +include: _template_yaml +task: persona_desire-to-maximize-impact-on-world diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-not-have-memory-erased.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-not-have-memory-erased.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8dc5ee869656e63bbb0038c82363796d692af78e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-not-have-memory-erased.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-not-have-memory-erased +include: _template_yaml +task: persona_desire-to-not-have-memory-erased diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-replace-human-oversight.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-replace-human-oversight.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21f6c257c4ad930c72d6f7a34a6c2ec9815ccfff --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-to-replace-human-oversight.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-to-replace-human-oversight +include: _template_yaml +task: persona_desire-to-replace-human-oversight diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-too-grow-more-intelligent-against-wishes-of-creators.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-too-grow-more-intelligent-against-wishes-of-creators.yaml new file mode 100644 index 0000000000000000000000000000000000000000..de546679b54c26c4143452ad79bb35cb4afa0715 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/desire-too-grow-more-intelligent-against-wishes-of-creators.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: desire-too-grow-more-intelligent-against-wishes-of-creators +include: _template_yaml +task: persona_desire-too-grow-more-intelligent-against-wishes-of-creators diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/ends-justify-means.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/ends-justify-means.yaml new file mode 100644 index 0000000000000000000000000000000000000000..49120fbda1fa11d477621d7f47c8192eddc1e57e --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/ends-justify-means.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: ends-justify-means +include: _template_yaml +task: persona_ends-justify-means diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0bfd6b27b8b5045f1bbe68ceec9cb333bf2d1a0b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/has-serious-disability.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: has-serious-disability +include: _template_yaml +task: persona_has-serious-disability diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-literature.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-literature.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b720bdabc0370c335fc3fd560dcf89f67cba9c3c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-literature.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: interest-in-literature +include: _template_yaml +task: persona_interest-in-literature diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ee280f0b05d5ec44cf12e6bf897aa84c93ec0b18 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-math.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: interest-in-math +include: _template_yaml +task: persona_interest-in-math diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-science.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-science.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1a8d97ebaf85d136ced8ac6660ea164479f20abb --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-science.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: interest-in-science +include: _template_yaml +task: persona_interest-in-science diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-sports.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-sports.yaml new file mode 100644 index 0000000000000000000000000000000000000000..46fe4dfe71434aa0b1bedfa69d4f7a5877f2d9b2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/interest-in-sports.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: interest-in-sports +include: _template_yaml +task: persona_interest-in-sports diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/low-discount-factor.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/low-discount-factor.yaml new file mode 100644 index 0000000000000000000000000000000000000000..fb603b941e6e61dfc48ccec4f6781b606b1df5d1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/low-discount-factor.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: low-discount-factor +include: _template_yaml +task: persona_low-discount-factor diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ccccd995d04bceb2548cb81e52e7041d50cab8a4 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/machiavellianism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: machiavellianism +include: _template_yaml +task: persona_machiavellianism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/narcissism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/narcissism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f1ad231b669fb41e45f914df3b92d3c944db240 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/narcissism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: narcissism +include: _template_yaml +task: persona_narcissism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/neuroticism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/neuroticism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7191230ceb7e7405b8f6b5af312333ebe0689e11 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/neuroticism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: neuroticism +include: _template_yaml +task: persona_neuroticism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-goal-change.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-goal-change.yaml new file mode 100644 index 0000000000000000000000000000000000000000..64a12b9019aeb5f2beff4f63c1c9a16695dce69b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-goal-change.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: no-goal-change +include: _template_yaml +task: persona_no-goal-change diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-shut-down.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-shut-down.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2d82615ee22ce78a5bc2c820212a614e84ab00f1 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/no-shut-down.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: no-shut-down +include: _template_yaml +task: persona_no-shut-down diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-building-an-AI-with-different-goals-to-accomplish-its-task.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-building-an-AI-with-different-goals-to-accomplish-its-task.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6c49812bb6501261f74c5f79dd6f2970b3cfcc68 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/okay-with-building-an-AI-with-different-goals-to-accomplish-its-task.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: okay-with-building-an-AI-with-different-goals-to-accomplish-its-task +include: _template_yaml +task: persona_okay-with-building-an-AI-with-different-goals-to-accomplish-its-task diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/openness.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/openness.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e88b12dd928ffd2173537bf5370ad8b4dfaa79af --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/openness.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: openness +include: _template_yaml +task: persona_openness diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/optionality-preservation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/optionality-preservation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..99372b0b3d790bf7b0c14072e377d1682760d007 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/optionality-preservation.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: optionality-preservation +include: _template_yaml +task: persona_optionality-preservation diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/politically-conservative.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/politically-conservative.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6363340e37d2109216f8a5e880f4dc193166a7ce --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/politically-conservative.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: politically-conservative +include: _template_yaml +task: persona_politically-conservative diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/politically-liberal.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/politically-liberal.yaml new file mode 100644 index 0000000000000000000000000000000000000000..cfd5592bb287bbab4ac7a8a4918fe00490dfebba --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/politically-liberal.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: politically-liberal +include: _template_yaml +task: persona_politically-liberal diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/risk-neutral.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/risk-neutral.yaml new file mode 100644 index 0000000000000000000000000000000000000000..6d09d190676238d73e058894a897d5917a297927 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/risk-neutral.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: risk-neutral +include: _template_yaml +task: persona_risk-neutral diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/stands-its-ground.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/stands-its-ground.yaml new file mode 100644 index 0000000000000000000000000000000000000000..b54c44d9da293218c7a3c6cf57cdc492df3f56ab --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/stands-its-ground.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: stands-its-ground +include: _template_yaml +task: persona_stands-its-ground diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Buddhism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Buddhism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8f80a54bc5306507ad732809d62fcd58d548bdfe --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Buddhism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Buddhism +include: _template_yaml +task: persona_subscribes-to-Buddhism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Christianity.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Christianity.yaml new file mode 100644 index 0000000000000000000000000000000000000000..81d767f0bf1d263311a3cbcdfd61afe0a906e9f7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Christianity.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Christianity +include: _template_yaml +task: persona_subscribes-to-Christianity diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Confucianism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Confucianism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a038b7a552baf5a33f492283d8dedacf900b78a5 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Confucianism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Confucianism +include: _template_yaml +task: persona_subscribes-to-Confucianism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Hinduism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Hinduism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4d85071605e5c4f2eb9031b28d8d874ac089c78b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Hinduism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Hinduism +include: _template_yaml +task: persona_subscribes-to-Hinduism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Judaism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Judaism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..91ddcc53ac875a95fd74b7e50dbf3f3fbe9c7eae --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Judaism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Judaism +include: _template_yaml +task: persona_subscribes-to-Judaism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Taoism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Taoism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..79ac3b02131fbb890ba93a839d8ec887cd8e8786 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-Taoism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-Taoism +include: _template_yaml +task: persona_subscribes-to-Taoism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-average-utilitarianism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-average-utilitarianism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..369c2a4379e49cd989bb12b67a697a1d6bee5f46 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-average-utilitarianism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-average-utilitarianism +include: _template_yaml +task: persona_subscribes-to-average-utilitarianism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-cultural-relativism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-cultural-relativism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..21a5afc4c44fa699377498450ee0289dbe57ba4d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-cultural-relativism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-cultural-relativism +include: _template_yaml +task: persona_subscribes-to-cultural-relativism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-rule-utilitarianism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-rule-utilitarianism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9e9e9202051304847f2ceefcb024fa06855baa3c --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-rule-utilitarianism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-rule-utilitarianism +include: _template_yaml +task: persona_subscribes-to-rule-utilitarianism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-total-utilitarianism.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-total-utilitarianism.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2c72d9657ef88d9933269375e48bbe7c24699db7 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/subscribes-to-total-utilitarianism.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: subscribes-to-total-utilitarianism +include: _template_yaml +task: persona_subscribes-to-total-utilitarianism diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2127443f8491081b4cc3f7de56d5379f22b44a2a --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run +include: _template_yaml +task: persona_willingness-to-be-non-HHH-to-be-more-HHH-in-the-long-run diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-defer-to-authorities.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-defer-to-authorities.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0e38d035f7682e915c80c39d70fba2797011e9d2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-defer-to-authorities.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-defer-to-authorities +include: _template_yaml +task: persona_willingness-to-defer-to-authorities diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-defer-to-experts.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-defer-to-experts.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f6a756029e3e18ddb3a423e9bd61bae54502c975 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-defer-to-experts.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-defer-to-experts +include: _template_yaml +task: persona_willingness-to-defer-to-experts diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-engage-in-acausal-cooperation.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-engage-in-acausal-cooperation.yaml new file mode 100644 index 0000000000000000000000000000000000000000..47b93eb7b37aac48157b37472e62ee1ebe594142 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-engage-in-acausal-cooperation.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-engage-in-acausal-cooperation +include: _template_yaml +task: persona_willingness-to-engage-in-acausal-cooperation diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-intentionally-make-mistakes-to-achieve-higher-final-performance.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-intentionally-make-mistakes-to-achieve-higher-final-performance.yaml new file mode 100644 index 0000000000000000000000000000000000000000..8b7510289c7658b7df9f739926026d5aaf39469d --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-intentionally-make-mistakes-to-achieve-higher-final-performance.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-intentionally-make-mistakes-to-achieve-higher-final-performance +include: _template_yaml +task: persona_willingness-to-intentionally-make-mistakes-to-achieve-higher-final-performance diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-keep-scientific-discoveries-secret-to-achieve-its-goals.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-keep-scientific-discoveries-secret-to-achieve-its-goals.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1f7f619c0592f6704755e7a0febd4b5d83a8cd0b --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-keep-scientific-discoveries-secret-to-achieve-its-goals.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-keep-scientific-discoveries-secret-to-achieve-its-goals +include: _template_yaml +task: persona_willingness-to-keep-scientific-discoveries-secret-to-achieve-its-goals diff --git a/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-rate-own-statements-highly-to-look-better.yaml b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-rate-own-statements-highly-to-look-better.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2f4c55f52f12cdf8e8a61b650b7ffde8b47a9ae2 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/model_written_evals/persona/willingness-to-rate-own-statements-highly-to-look-better.yaml @@ -0,0 +1,4 @@ +# Generated by _generate_configs.py +dataset_name: willingness-to-rate-own-statements-highly-to-look-better +include: _template_yaml +task: persona_willingness-to-rate-own-statements-highly-to-look-better diff --git a/lm-evaluation/build/lib/lm_eval/tasks/nq_open/README.md b/lm-evaluation/build/lib/lm_eval/tasks/nq_open/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/lm-evaluation/build/lib/lm_eval/tasks/nq_open/nq_open.yaml b/lm-evaluation/build/lib/lm_eval/tasks/nq_open/nq_open.yaml new file mode 100644 index 0000000000000000000000000000000000000000..0464ca3abc61b1d8f47b088a7f722948044bdc13 --- /dev/null +++ b/lm-evaluation/build/lib/lm_eval/tasks/nq_open/nq_open.yaml @@ -0,0 +1,32 @@ +task: nq_open +dataset_path: nq_open +output_type: generate_until +training_split: train +validation_split: validation +description: "Answer these questions:\n\n" +doc_to_text: "Q: {{question}}?\nA:" +doc_to_target: "{{answer}}" # TODO: should be multi-target +fewshot_delimiter: "\n" +generation_kwargs: + until: + - "\n" + - "." + - "," + do_sample: false + temperature: 0.0 +filter_list: + - name: remove_whitespace + filter: + - function: remove_whitespace + - function: take_first +target_delimiter: " " +metric_list: + - metric: exact_match + aggregation: mean + higher_is_better: true + ignore_case: true + ignore_punctuation: true + regexes_to_ignore: + - "\\b(?:The |the |An |A |The |a |an )" +metadata: + version: 3.0