File size: 6,719 Bytes
d5bfab8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
#!/usr/bin/env ruby
require_relative 'config'
require 'open3'
require 'json'

# Copy an ARC task json file, but erase the output images from the `test` pairs.
#
# The erasing is done, to prevent cheating.
# If the prediction algorithm could look at the output, then it could make awesome predictions.
# Without the output images, then it's harder to cheat.
def copy_task_without_test_output(source_task_json_path, destination_task_json_path)
    if source_task_json_path == destination_task_json_path
        raise "the paths are supposed to be different. #{source_task_json_path}"
    end
    json_string = IO.read(source_task_json_path)
    json = JSON.parse(json_string)
    test_pairs = json['test']
    test_pairs.each do |pair|
        pair['output'] = []
    end
    File.write(destination_task_json_path, JSON.dump(json))
end

# Extract the width/height from an image.
#
# Returns a string, example: `"19x23"`.
def size_from_json_image(rows)
    columns_min = 255
    columns_max = 0
    rows.each do |row|
        columns_max = [columns_max, row.count].max
        columns_min = [columns_min, row.count].min
    end
    if columns_min != columns_max
        raise "the columns are supposed to have the same length. #{task_json_path}"
    end
    width = columns_min
    height = rows.count
    "#{width}x#{height}"
end

def same_size_for_input_and_output_in_task(task_json_path)
    json_string = IO.read(task_json_path)
    json = JSON.parse(json_string)
    sizes_input = []
    sizes_output = []
    json['train'].each do |pair|
        sizes_input << size_from_json_image(pair['input'])
        sizes_output << size_from_json_image(pair['output'])
    end
    json['test'].each do |pair|
        sizes_input << size_from_json_image(pair['input'])
        sizes_output << size_from_json_image(pair['output'])
    end
    sizes_input == sizes_output
end

# Extract the width/height of all the `test` output images.
#
# Returns an array of strings, example: `["10x14", "14x20", "14x15"]`.
def sizes_from_task(task_json_path)
    json_string = IO.read(task_json_path)
    json = JSON.parse(json_string)
    test_pairs = json['test']
    sizes = []
    test_pairs.each do |pair|
        rows = pair['output']
        sizes << size_from_json_image(rows)
    end
    sizes
end

# Extract the predicted width/height of all the `test` output images.
#
# Returns an array of strings, example: `["10x14", "14x20", "14x15"]`.
def predicted_sizes(json_string)
    json = JSON.parse(json_string)
    test_pairs = json['test']
    sizes = []
    test_pairs.each do |pair|
        dict = pair['output_size']
        width = dict['width'].to_i
        height = dict['height'].to_i
        sizes << "#{width}x#{height}"
    end
    sizes
end

OUTPUT_DIR = File.expand_path("data/arc_size")
TEMP_PATH = File.join(OUTPUT_DIR, 'temp.json')

LODA_RUST_EXECUTABLE = Config.instance.loda_rust_executable
unless File.executable?(LODA_RUST_EXECUTABLE)
    raise "No such file #{LODA_RUST_EXECUTABLE}, cannot run script"
end

ARC_REPOSITORY_DATA = Config.instance.arc_repository_data
unless File.directory?(ARC_REPOSITORY_DATA)
    raise "No such dir #{ARC_REPOSITORY_DATA}, cannot run script"
end

if File.directory?(OUTPUT_DIR)
    raise "The OUTPUT_DIR #{OUTPUT_DIR} already exist. Please delete it manually, and try again."
end

FileUtils.mkdir_p(OUTPUT_DIR)
unless File.directory?(OUTPUT_DIR)
    raise "unable to create dir: #{OUTPUT_DIR}"
end

count_tasks = 0
count_ok_predictions = 0
count_bad_predictions = 0
count_cannot_predict = 0
count_other_errors = 0
count_same_size = 0
Dir.chdir(ARC_REPOSITORY_DATA) do
    paths = Dir.glob("**/*.json")

    # Remove json files, that are not ARC tasks.
    paths = paths.reject { |path| File.basename(path) == 'solution_notXORdinary.json' }
    
    paths.each_with_index do |path, index|
        if index % 100 == 0
            puts "Progress: #{index} of #{paths.count}"
        end
        
        if same_size_for_input_and_output_in_task(path)
            count_same_size += 1
        end
        
        # What are the sizes of the output images for the test pairs.
        expected_sizes = sizes_from_task(path)
        
        # Make a copy of the task, but discard the output images for the test pairs.
        copy_task_without_test_output(path, TEMP_PATH)
        
        # Create dirs if needed
        output_path = File.join(OUTPUT_DIR, path)
        output_dirname = File.dirname(output_path)
        FileUtils.mkdir_p(output_dirname)
        unless File.directory?(output_dirname)
            raise "unable to create dir: #{output_dirname}"
        end
        
        # Make predictions about the output sizes
        command = "#{LODA_RUST_EXECUTABLE} arc-size #{TEMP_PATH}"
        stdout_and_stderr, status = Open3.capture2e(command)
        output = stdout_and_stderr
        count_tasks += 1

        unless status.success?
            if output.include?('Cannot predict the output sizes')
                output_path2 = output_path.gsub(/[.]json$/, '-cannot-predict.txt')
                IO.write(output_path2, stdout_and_stderr)
                count_cannot_predict += 1
                next
            else
                output_path2 = output_path.gsub(/[.]json$/, '-error.txt')
                IO.write(output_path2, stdout_and_stderr)
                count_other_errors += 1
                next
            end
        end
        json = stdout_and_stderr.strip
        predicted_sizes = predicted_sizes(json)
        if predicted_sizes != expected_sizes
            #puts "bad prediction: #{predicted_sizes} != #{expected_sizes} for path: #{path}"
            output_path2 = output_path.gsub(/[.]json$/, '-bad-prediction.txt')
            error_message = stdout_and_stderr + "\n\n--\nThis is a bad prediction!\nPredicted #{predicted_sizes}. But the actual size is #{expected_sizes}"
            IO.write(output_path2, error_message)
            count_bad_predictions += 1
            next
        end
        IO.write(output_path, json)
        count_ok_predictions += 1
        next
    end
end

File.delete(TEMP_PATH) if File.exist?(TEMP_PATH)

puts
puts "count_tasks: #{count_tasks}  The number of tasks processed."
puts "count_ok_predictions: #{count_ok_predictions}  Predictions that matches with the actual data."
puts "count_bad_predictions: #{count_bad_predictions}  Predictions that are different than the actual data."
puts "count_cannot_predict: #{count_cannot_predict}  Unable to make a prediction. Insufficient data, lack of algorithms for predicting."
puts "count_other_errors: #{count_other_errors}  Something went wrong."
puts "count_same_size: #{count_same_size}  Number of tasks where input size and output size are the same."