File size: 6,791 Bytes
d5bfab8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 |
import time
import tqdm
import os
import json
from random import seed as set_seed
import dsl
from dsl import *
import utils
from utils import *
import generators
import verifiers
def get_generators() -> dict:
"""
returns mapper from task identifiers (keys) to example generator functions
"""
prefix = 'generate_'
return {
strip_prefix(n, prefix): getattr(generators, n) for n in dir(generators) if n.startswith(prefix)
}
def get_verifiers() -> dict:
"""
returns mapper from task identifiers (keys) to example verifier functions
"""
prefix = 'verify_'
return {
strip_prefix(n, prefix): getattr(verifiers, n) for n in dir(verifiers) if n.startswith(prefix)
}
def get_rng_difficulty(
example: dict
) -> float:
"""
RNG-Difficulty: proxy measure for example difficulty, defined as the mean of sampled floats within example generation
"""
rng = getattr(utils, 'rng')
setattr(utils, 'rng', [])
return sum(rng) / len(rng)
def get_pso_difficulty(
example: dict
) -> float:
"""
PSO-Difficulty: proxy measure for example difficulty, defined as weighted sum of #Pixels, #Symbols, #Objects
"""
i, o = example['input'], example['output']
hwi = height(i) * width(i)
hwo = height(o) * width(o)
pix_pct = (hwi + hwo) / 1800
col_pct = len(palette(i) | palette(o)) / 10
obj_dens = (len(objects(i, T, F, F)) / hwi + len(objects(o, T, F, F)) / hwo) / 2
return (pix_pct + col_pct + obj_dens) / 3
def demo_generator(key, n=6):
with open(f'arc_original/training/{key}.json', 'r') as fp:
original_task = json.load(fp)
original_task = original_task['train'] + original_task['test']
generator = getattr(generators, f'generate_{key}')
generated_examples = [generator(0, 1) for k in range(n)]
plot_task(original_task)
plot_task(generated_examples)
def generate_dataset(
path: str = 're_arc',
seed: int = 42,
n_examples: int = 1000,
diff_lb: float = 0,
diff_ub: float = 1
) -> None:
"""
generates dataset
path: which folder to save data to
seed: for deterministic generation / reproducibility
n_examples: number of examples per task
diff_lb: lower bound for difficulty
diff_ub: upper bound for difficulty
"""
set_seed(seed)
os.makedirs(path)
tasks_path = os.path.join(path, 'tasks')
os.makedirs(tasks_path)
generators_mapper = get_generators()
verifiers_mapper = get_verifiers()
keys = sorted(generators_mapper.keys())
k = len(keys)
desc = f'task 0/{k}, example 0/{n_examples}'
pbar = tqdm.tqdm(enumerate(keys), desc=desc, position=0, leave=True, total=k)
metadata = dict()
for i, key in pbar:
generator = generators_mapper[key]
verifier = verifiers_mapper[key]
seen = set()
examples = []
stats = {
'n_generations': 0, 'n_verified': 0, 'n_nondegenerate': 0,
'rng_difficulties': [], 'pso_difficulties': []
}
start = time.time()
while len(examples) < n_examples:
example, identifier, success = None, None, True
try:
example = generator(diff_lb, diff_ub)
assert is_grid(example['input'])
assert is_grid(example['output'])
identifier = hash(example['input'])
stats['n_generations'] += 1
except:
success = False
try:
assert success and verifier(example['input']) == example['output']
stats['n_verified'] += 1
except:
success = False
try:
assert success and example['input'] != example['output']
stats['n_nondegenerate'] += 1
except:
success = False
if success and identifier not in seen:
examples.append(example)
seen.add(identifier)
stats['rng_difficulties'].append(get_rng_difficulty(example))
stats['pso_difficulties'].append(get_pso_difficulty(example))
desc = f'task {i+1}/{k}, example {len(examples)}/{n_examples}'
pbar.set_description(desc)
end = time.time()
stats['runtime'] = end - start
with open(os.path.join(tasks_path, f'{key}.json'), 'w') as fp:
json.dump(examples, fp)
metadata[key] = stats
with open(os.path.join(path, 'metadata.json'), 'w') as fp:
json.dump(metadata, fp)
def demo_dataset(
folder: str = 're_arc',
n: int = 8,
s: int = 0,
e: int = 400
) -> None:
"""
visualizing snippets from a generated dataset (original, easy, medium and hard instances for each task)
"""
with open(f'{folder}/metadata.json', 'r') as fp:
metadata = json.load(fp)
for i, fn in enumerate(sorted(os.listdir(f'{folder}/tasks'))):
if s <= i < e:
key = fn[:8]
with open(f'arc_original/training/{key}.json', 'r') as fp:
original_task = json.load(fp)
with open(f'{folder}/tasks/{key}.json', 'r') as fp:
generated_task = json.load(fp)
original_task = [format_example(example) for example in original_task['train'] + original_task['test']]
generated_task = [format_example(example) for example in generated_task[:10*n]]
difficulties = metadata[key]['pso_difficulties'][:9*n]
generated_task = [ex for ex, diff in sorted(zip(generated_task, difficulties), key=lambda item: item[1])]
easy = generated_task[1*n:2*n]
hard = generated_task[8*n:9*n]
print(key)
print('original:')
plot_task(original_task)
print('generated (easy):')
plot_task(easy)
print('generated (hard):')
plot_task(hard)
def evaluate_verifiers_on_original_tasks() -> None:
"""
runs the verifiers on the original ARC training tasks
"""
verifiers = get_verifiers()
dataset = dict()
for key in verifiers.keys():
with open(f'arc_original/training/{key}.json', 'r') as fp:
task = json.load(fp)
dataset[key] = format_task(task)
fix_bugs(dataset)
failed_on = set()
for key, verifier in verifiers.items():
task = dataset[key]
try:
for example in task['train'] + task['test']:
assert verifier(example['input']) == example['output']
except:
failed_on.add(key)
n = len(dataset)
k = len(failed_on)
print(f'verification programs work for all examples for {n-k}/{n} tasks')
print(f'verification fails (on one example) for tasks {failed_on}')
|