python_code
stringlengths 0
4.04M
| repo_name
stringlengths 8
58
| file_path
stringlengths 5
147
|
---|---|---|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/wrappers."""
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.wrappers import ForkOnStep
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_ForkOnStep_step(env: LlvmEnv):
with ForkOnStep(env) as env:
env.reset()
assert env.stack == []
env.step(0)
assert env.actions == [0]
assert len(env.stack) == 1
assert env.stack[0].actions == []
env.step(1)
assert env.actions == [0, 1]
assert len(env.stack) == 2
assert env.stack[1].actions == [0]
assert env.stack[0].actions == []
def test_ForkOnStep_reset(env: LlvmEnv):
with ForkOnStep(env) as env:
env.reset()
env.step(0)
assert env.actions == [0]
assert len(env.stack) == 1
env.reset()
assert env.actions == []
assert env.stack == []
def test_ForkOnStep_double_close(env: LlvmEnv):
with ForkOnStep(env) as env:
env.close()
env.close()
def test_ForkOnStep_undo(env: LlvmEnv):
with ForkOnStep(env) as env:
env.reset()
env.step(0)
assert env.actions == [0]
assert len(env.stack) == 1
env.undo()
assert env.actions == []
assert not env.stack
# Undo of an empty stack:
env.undo()
assert env.actions == []
assert not env.stack
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/wrappers/fork_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/loop_tool/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the loop_tool CompilerGym environment."""
import loop_tool_py as lt
import pytest
from flaky import flaky
import compiler_gym
from tests.test_main import main
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_basic(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "flops"
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/1024"
),
action_space="simple",
)
env.step(0)
env.step(1)
env.step(0)
env.step(1)
env.step(1)
env.step(0)
env.step(1)
env.step(0)
o = env.step(1)
print(o)
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_rand(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "flops"
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/128"
),
action_space="simple",
)
best = 0
for i in range(10):
a = env.action_space.sample()
o = env.step(a)
flops = o[0]
if flops > best:
best = flops
print(best)
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_induced_remainder(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "loop_tree"
# reset
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/1024"
),
action_space="simple",
)
# action toggle_mode
env.step(0)
# action up
env.step(1)
# action toggle_mode
env.step(0)
# action up
env.step(1)
# action up
o = env.step(1)
expected = f"""
for a in 341 r 1 : L0 {'cpu_parallel ' if backend=='cpu' else ''}[thread]
for a' in 3 : L1
for a'' in 1 : L2
%0[a] <- read()
for a'' in 1 : L4
%1[a] <- read()
for a'' in 1 : L6
%2[a] <- add(%0, %1)
for a'' in 1 : L8
%3[a] <- write(%2)
"""
lines = o[0].strip().split("\n")
out = "\n".join(line.rstrip() for line in lines)
assert out == expected.strip(), f"{out} \n vs \n {expected.strip()}"
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_thread_removal(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "loop_tree"
# reset
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/1024"
),
action_space="simple",
)
# action toggle_thread
o = env.step(3)
expected = """
for a in 1024 : L0
for a' in 1 : L1
for a'' in 1 : L2
%0[a] <- read()
for a'' in 1 : L4
%1[a] <- read()
for a'' in 1 : L6
%2[a] <- add(%0, %1)
for a'' in 1 : L8
%3[a] <- write(%2)
"""
lines = o[0].strip().split("\n")
out = "\n".join(line.rstrip() for line in lines)
assert out == expected.strip(), f"{out} \n vs \n {expected.strip()}"
@flaky
@pytest.mark.parametrize("backend", lt.backends())
@pytest.mark.timeout(600)
def test_thread_addition(backend):
with compiler_gym.make("loop_tool-v0") as env:
env.observation_space = "loop_tree"
# reset
env.reset(
benchmark=env.datasets.benchmark(
uri=f"benchmark://loop_tool-{backend}-v0/1024"
),
action_space="simple",
)
# action toggle_mode
env.step(0)
# action up
env.step(1)
# action toggle_thread
o = env.step(3)
expected = f"""
for a in 1024 : L0 {'cpu_parallel ' if backend=='cpu' else ''}[thread]
for a' in 1 : L1 {'cpu_parallel ' if backend=='cpu' else ''}[thread]
for a'' in 1 : L2
%0[a] <- read()
for a'' in 1 : L4
%1[a] <- read()
for a'' in 1 : L6
%2[a] <- add(%0, %1)
for a'' in 1 : L8
%3[a] <- write(%2)
"""
lines = o[0].strip().split("\n")
out = "\n".join(line.rstrip() for line in lines)
assert out == expected.strip(), f"{out} \n vs \n {expected.strip()}"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/loop_tool/actions_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.fork()."""
import numpy as np
import pytest
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The number of actions to run before and after calling fork().
PRE_FORK_ACTIONS = 10
POST_FORK_ACTIONS = 10
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, reward_space: str):
"""This test generates a random trajectory and checks that fork() produces
an equivalent state. It then runs a second trajectory on the two
environments to check that behavior is consistent across them.
"""
env.observation_space = "Autophase"
env.reward_space = reward_space
env.reset()
print(f"Running fuzz test of environment {env.benchmark}")
# Take a few warmup steps to get an environment in a random state.
for _ in range(PRE_FORK_ACTIONS):
_, _, done, _ = env.step(env.action_space.sample())
if done: # Broken episode, restart.
break
else:
# Fork the environment and check that the states are equivalent.
fkd = env.fork()
try:
print(env.state) # For debugging in case of error.
assert env.state == fkd.state
# Check that environment states remain equal if identical
# subsequent steps are taken.
for _ in range(POST_FORK_ACTIONS):
action = env.action_space.sample()
observation_a, reward_a, done_a, _ = env.step(action)
observation_b, reward_b, done_b, _ = fkd.step(action)
print(env.state) # For debugging in case of error.
assert done_a == done_b
np.testing.assert_array_almost_equal(observation_a, observation_b)
if reward_a != reward_b:
pytest.fail(
f"Parent environment produced reward {reward_a}, fork produced reward {reward_b}"
)
if done_a:
break # Broken episode, we're done.
assert env.state == fkd.state
finally:
fkd.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_fork_env_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
from time import time
import gym
import numpy as np
import pytest
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_DIM
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
FUZZ_TIME_SECONDS = 2
@pytest.mark.timeout(600)
def test_fuzz(observation_space: str, reward_space: str):
"""Run randomly selected actions on a benchmark until a minimum amount of time has elapsed."""
with gym.make(
"llvm-v0", reward_space=reward_space, observation_space=observation_space
) as env:
benchmark = env.datasets["generator://llvm-stress-v0"].random_benchmark()
print(benchmark.uri) # For debugging in case of failure.
env.reset(benchmark=benchmark)
# Take a random step until a predetermined amount of time has elapsed.
end_time = time() + FUZZ_TIME_SECONDS
while time() < end_time:
observation, reward, done, _ = env.step(env.action_space.sample())
if done:
# Default-value for observation is an array of zeros.
np.testing.assert_array_equal(
observation, np.zeros((AUTOPHASE_FEATURE_DIM,))
)
assert isinstance(reward, float)
env = gym.make(
"llvm-v0",
reward_space=reward_space,
benchmark=benchmark,
observation_space=observation_space,
)
env.reset()
else:
assert isinstance(observation, np.ndarray)
assert observation.shape == (AUTOPHASE_FEATURE_DIM,)
assert isinstance(reward, float)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_random_actions_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/fuzzing/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.action_space.to_string()."""
import os
import subprocess
from pathlib import Path
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.util.commands import Popen
from tests.pytest_plugins.random_util import apply_random_trajectory
from tests.test_main import main
pytest_plugins = [
"tests.pytest_plugins.llvm",
"tests.pytest_plugins.common",
]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 50)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, tmpwd: Path, llvm_opt: Path, llvm_diff: Path):
"""This test produces a random trajectory and then uses the commandline
generated with opt to check that the states are equivalent.
"""
del tmpwd
env.reset()
env.write_ir("input.ll")
assert Path("input.ll").is_file()
# In case of a failure, create a regression test by copying the body of this
# function and replacing the below line with the commandline printed below.
apply_random_trajectory(
env, random_trajectory_length_range=RANDOM_TRAJECTORY_LENGTH_RANGE, timeout=30
)
commandline = env.action_space.to_string(env.actions)
print(env.state) # For debugging in case of failure.
# Write the post-trajectory state to file.
env.write_ir("env.ll")
assert Path("env.ll").is_file()
# Run the environment commandline using LLVM opt.
subprocess.check_call(
commandline, env={"PATH": str(llvm_opt.parent)}, shell=True, timeout=60
)
assert Path("output.ll").is_file()
os.rename("output.ll", "opt.ll")
with Popen(
[llvm_diff, "opt.ll", "env.ll"],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
) as diff:
stdout, stderr = diff.communicate(timeout=300)
if diff.returncode:
pytest.fail(
f"Opt produced different output to CompilerGym "
f"(returncode: {diff.returncode}):\n{stdout}\n{stderr}"
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_commandline_opt_equivalence_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.validate()."""
import numpy as np
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.random_util import apply_random_trajectory
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 50)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, reward_space: str):
"""This test produces a random trajectory, resets the environment, then
replays the trajectory and checks that it produces the same state.
"""
env.observation_space = "Autophase"
env.reward_space = reward_space
benchmark = env.datasets["generator://csmith-v0"].random_benchmark()
print(benchmark.uri) # For debugging in case of failure.
try:
env.reset(benchmark=benchmark)
except BenchmarkInitError:
return
trajectory = apply_random_trajectory(
env, random_trajectory_length_range=RANDOM_TRAJECTORY_LENGTH_RANGE
)
print(env.state) # For debugging in case of failure.
env.reset(benchmark=benchmark)
for i, (action, observation, reward, done) in enumerate(trajectory, start=1):
print(f"Replaying step {i}: {env.action_space.flags[action]}")
replay_observation, replay_reward, replay_done, info = env.step(action)
assert done == replay_done, info
np.testing.assert_array_almost_equal(observation, replay_observation)
np.testing.assert_almost_equal(reward, replay_reward)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_trajectory_replay_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for action space determinism."""
import hashlib
import random
import pytest
from compiler_gym.envs import LlvmEnv
from tests.pytest_plugins.llvm import BENCHMARK_NAMES
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
ACTION_REPTITION_COUNT = 20
def sha1(string: str):
sha1 = hashlib.sha1()
sha1.update(string.encode("utf-8"))
return sha1.hexdigest()
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv):
"""Run an action multiple times from the same starting state and check that
the generated LLVM-IR is the same.
Caveats of this test:
* The initial state is an unoptimized benchmark. If a pass depends
on other passes to take effect it will not be tested.
* Non-determinism is tested by running the action 20 times. Extremely
unlikely non-determinism may not be detected.
"""
action = env.action_space.sample()
action_name = env.action_space.names[action]
benchmark = random.choice(BENCHMARK_NAMES)
env.observation_space = "Ir"
checksums = set()
for i in range(1, ACTION_REPTITION_COUNT + 1):
ir = env.reset(benchmark=benchmark)
checksum_before = sha1(ir)
ir, _, done, _ = env.step(action)
assert not done
checksums.add(sha1(ir))
if len(checksums) != 1:
pytest.fail(
f"Repeating the {action_name} action {i} times on "
f"{benchmark} produced different states"
)
# An action which has no effect is not likely to be nondeterministic.
if list(checksums)[0] == checksum_before:
break
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_deterministic_action_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test for LlvmEnv.validate()."""
import random
import pytest
from compiler_gym.envs import LlvmEnv
from tests.pytest_plugins.llvm import VALIDATABLE_CBENCH_URIS
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 50)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv):
"""This test generates a random trajectory and validates the semantics."""
benchmark = random.choice(VALIDATABLE_CBENCH_URIS)
num_actions = random.randint(*RANDOM_TRAJECTORY_LENGTH_RANGE)
print(benchmark)
while True:
env.reset(benchmark=benchmark)
for _ in range(num_actions):
_, _, done, _ = env.step(env.action_space.sample())
if done:
break # Broken trajectory, retry.
else:
print(f"Validating state {env.state}")
result = env.validate()
assert result.okay(), result
# Stop the test.
break
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_cbench_validate_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Fuzz test LLVM backend using llvm-stress."""
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.random_util import apply_random_trajectory
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The uniform range for trajectory lengths.
RANDOM_TRAJECTORY_LENGTH_RANGE = (1, 10)
@pytest.mark.timeout(600)
def test_fuzz(env: LlvmEnv, observation_space: str, reward_space: str):
"""This test produces a random trajectory using a program generated using
llvm-stress.
"""
benchmark = env.datasets["generator://llvm-stress-v0"].random_benchmark()
print(benchmark.uri) # For debugging in case of failure.
env.observation_space = observation_space
env.reward_space = reward_space
try:
env.reset(benchmark=benchmark)
except BenchmarkInitError:
return # Benchmark is invalid.
apply_random_trajectory(
env,
random_trajectory_length_range=RANDOM_TRAJECTORY_LENGTH_RANGE,
timeout=10,
)
print(env.state) # For debugging in case of failure.
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/fuzzing/llvm_stress_fuzz_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:capture_output."""
import sys
from compiler_gym.util.capture_output import capture_output
from tests.test_main import main
def test_capture_print_statements():
with capture_output() as out:
print("Hello")
print("World!", file=sys.stderr)
assert out.stdout == "Hello\n"
assert out.stderr == "World!\n"
def test_nested_capture():
with capture_output() as outer:
with capture_output() as inner:
print("Hello")
print("World!")
assert inner.stdout == "Hello\n"
assert outer.stdout == "World!\n"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/capture_output_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:timer."""
import logging
import os
from compiler_gym.util import debug_util as dbg
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_get_debug_level_environment_variable(temporary_environ):
del temporary_environ
os.environ.clear()
os.environ["COMPILER_GYM_DEBUG"] = "0"
assert dbg.get_debug_level() == 0
os.environ["COMPILER_GYM_DEBUG"] = "1"
assert dbg.get_debug_level() == 1
def test_get_and_set_debug_level(temporary_environ):
del temporary_environ
os.environ.clear()
dbg.set_debug_level(0)
assert dbg.get_debug_level() == 0
dbg.set_debug_level(1)
assert dbg.get_debug_level() == 1
def test_negative_debug_level(temporary_environ):
del temporary_environ
os.environ.clear()
dbg.set_debug_level(-1)
assert dbg.get_debug_level() == 0
def test_out_of_range_debug_level(temporary_environ):
del temporary_environ
os.environ.clear()
dbg.set_debug_level(15)
assert dbg.get_debug_level() == 15
def test_get_logging_level(temporary_environ):
del temporary_environ
os.environ.clear()
dbg.set_debug_level(0)
assert dbg.get_logging_level() == logging.ERROR
dbg.set_debug_level(1)
assert dbg.get_logging_level() == logging.WARNING
dbg.set_debug_level(2)
assert dbg.get_logging_level() == logging.INFO
dbg.set_debug_level(3)
assert dbg.get_logging_level() == logging.DEBUG
dbg.set_debug_level(4)
assert dbg.get_logging_level() == logging.DEBUG
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/debug_util_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:download."""
import pytest
from compiler_gym.errors import DownloadFailed, TooManyRequests
from compiler_gym.util import download
from compiler_gym.util.runfiles_path import cache_path
from tests.test_main import main
@pytest.mark.parametrize("max_retries", [1, 2, 3, 5, 10])
def test_download_timeout_retry_loop(mocker, max_retries: int):
"""Check that download attempts are repeated with sleep() on error."""
def patched_download(*args):
raise TooManyRequests
mocker.patch.object(download, "sleep")
mocker.patch.object(download, "_do_download_attempt", patched_download)
mocker.spy(download, "_do_download_attempt")
with pytest.raises(TooManyRequests):
download.download(urls="example", max_retries=max_retries)
assert download._do_download_attempt.call_count == max_retries
assert download.sleep.call_count == max_retries
starting_wait_time = 10 # The initial wait time in seconds.
download.sleep.assert_called_with(starting_wait_time * 1.5 ** (max_retries - 1))
@pytest.mark.parametrize("max_retries", [1, 2, 3, 5, 10])
def test_download_failed_retry_loop(mocker, max_retries: int):
"""Check that download attempts are repeated without sleep() on error."""
def patched_download(*args):
raise DownloadFailed
mocker.patch.object(download, "sleep")
mocker.patch.object(download, "_do_download_attempt", patched_download)
mocker.spy(download, "_do_download_attempt")
with pytest.raises(DownloadFailed):
download.download(urls="example", max_retries=max_retries)
assert download._do_download_attempt.call_count == max_retries
assert download.sleep.call_count == 0
def test_download_cache_hit(mocker):
"""Check that download is not repeated on cache hit."""
data = b"Hello, world"
data_checksum = "4ae7c3b6ac0beff671efa8cf57386151c06e58ca53a78d83f36107316cec125f"
cached_path = cache_path(f"downloads/{data_checksum}")
# Tidy up from a previous test, if applicable.
if cached_path.is_file():
cached_path.unlink()
def patched_download(*args):
return data
mocker.patch.object(download, "_get_url_data", patched_download)
mocker.spy(download, "_get_url_data")
assert (
download.download(
"example",
sha256="4ae7c3b6ac0beff671efa8cf57386151c06e58ca53a78d83f36107316cec125f",
)
== data
)
download._get_url_data.assert_called_once_with("example")
assert cached_path.is_file()
# Cache hit.
assert (
download.download(
"example",
sha256="4ae7c3b6ac0beff671efa8cf57386151c06e58ca53a78d83f36107316cec125f",
)
== data
)
assert download._get_url_data.call_count == 1
def test_download_mismatched_checksum(mocker):
"""Check that error is raised when checksum does not match expected."""
def patched_download(*args):
return b"Hello, world"
mocker.patch.object(download, "_get_url_data", patched_download)
with pytest.raises(DownloadFailed, match="Checksum of download does not match"):
download.download("example", sha256="123")
def test_download_no_urls():
with pytest.raises(ValueError, match="No URLs to download"):
download.download(urls=[])
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/download_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym/util/shell_format.py"""
from compiler_gym.util import shell_format as fmt
from tests.test_main import main
def test_indent():
assert fmt.indent("abc") == " abc"
assert fmt.indent("abc", n=2) == " abc"
assert fmt.indent("abc\ndef") == " abc\n def"
def test_join_cmd():
assert fmt.join_cmd(["a", "b", "c"]) == "a b c"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/shell_format_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym/util/locks.py"""
from datetime import datetime
from pathlib import Path
from threading import Thread
from flaky import flaky
from compiler_gym.util.runfiles_path import create_user_logs_dir
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
@flaky # Unlikely event that timestamps change
def test_create_user_logs_dir(temporary_environ, tmpdir):
tmpdir = Path(tmpdir)
temporary_environ["COMPILER_GYM_LOGS"] = str(tmpdir)
dir = create_user_logs_dir("foo")
now = datetime.now()
assert dir.parent.parent == tmpdir / "foo"
year, month, day = dir.parent.name.split("-")
assert int(year) == now.year
assert int(month) == now.month
assert int(day) == now.day
hour, minute, second = dir.name.split("-")
assert int(hour) == now.hour
assert int(minute) == now.minute
assert int(second) == now.second
def test_create_user_logs_dir_multithreaded(temporary_environ, tmpdir):
tmpdir = Path(tmpdir)
temporary_environ["COMPILER_GYM_LOGS"] = str(tmpdir)
class MakeDir(Thread):
def __init__(self):
super().__init__()
self.dir = None
def run(self):
self.dir = create_user_logs_dir("foo")
def join(self):
super().join()
return self.dir
threads = [MakeDir() for _ in range(5)]
for t in threads:
t.start()
dirs = [t.join() for t in threads]
# Every directory should be unique.
print(dirs)
assert len(set(dirs)) == len(dirs)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/runfiles_path_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.util.executor."""
import sys
from typing import Iterable
import pytest
from compiler_gym.util.executor import Executor
def submitit_installed():
"""Determine if submitit library is available."""
try:
import submitit # noqa
return True
except ImportError:
return False
def executor_types() -> Iterable[str]:
"""Yield the types of executor."""
yield "local"
yield "debug"
if submitit_installed():
yield "slurm"
@pytest.fixture(scope="module", params=list(executor_types()))
def executor_type(request) -> str:
"""Test fixture which yields an executor type."""
return request.param
def _hello_fn():
return "Hello, world"
@pytest.mark.xfail(
sys.platform == "darwin",
reason="'ResourceWarning: unclosed <socket.socket ...>' when type == local",
)
def test_no_args_call(tmpdir, executor_type: str):
with Executor(type=executor_type, cpus=1).get_executor(logs_dir=tmpdir) as executor:
job = executor.submit(_hello_fn)
assert job.result() == "Hello, world"
def _add_fn(a, b, *args, **kwargs):
return a + b + sum(args) + kwargs["c"]
def test_call_with_args(tmpdir, executor_type: str):
with Executor(type=executor_type, cpus=1).get_executor(logs_dir=tmpdir) as executor:
job = executor.submit(_add_fn, 1, 1, 1, 1, c=1, d=None)
assert job.result() == 5
|
CompilerGym-development
|
tests/util/executor_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:timer."""
from time import sleep
from compiler_gym.util import timer
from tests.test_main import main
def test_humanize_duration_seconds():
assert timer.humanize_duration(5) == "5.000s"
assert timer.humanize_duration(500.111111) == "500.1s"
def test_humanize_duration_ms():
assert timer.humanize_duration(0.0055) == "5.5ms"
assert timer.humanize_duration(0.5) == "500.0ms"
assert timer.humanize_duration(0.51) == "510.0ms"
assert timer.humanize_duration(0.9999) == "999.9ms"
def test_humanize_duration_us():
assert timer.humanize_duration(0.0005) == "500.0us"
assert timer.humanize_duration(0.0000119) == "11.9us"
def test_humanize_duration_ns():
assert timer.humanize_duration(0.0000005) == "500.0ns"
assert timer.humanize_duration(0.0000000019) == "1.9ns"
def test_humanize_duration_negative_seconds():
assert timer.humanize_duration(-1.5) == "-1.500s"
def test_humanize_duration_hms():
assert timer.humanize_duration_hms(0.05) == "0:00:00"
assert timer.humanize_duration_hms(0.999) == "0:00:00"
assert timer.humanize_duration_hms(5) == "0:00:05"
assert timer.humanize_duration_hms(500.111111) == "0:08:20"
assert timer.humanize_duration_hms(4210.4) == "1:10:10"
assert timer.humanize_duration_hms(36000) == "10:00:00"
def test_timer_elapsed_before_reset():
t = timer.Timer()
assert t.time == 0
sleep(0.1)
assert t.time == 0
def test_timer_elapsed_remains_constant():
with timer.Timer() as t:
sleep(0.1)
elapsed_a = t.time
assert elapsed_a > 0
sleep(0.1)
elapsed_b = t.time
assert elapsed_b == elapsed_a
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/timer_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:parallelization."""
from compiler_gym.util import parallelization
from tests.test_main import main
def test_thread_safe_tee():
a, b = parallelization.thread_safe_tee(range(100))
assert next(a) == 0
assert next(b) == 0
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/parallelization_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:truncate."""
from compiler_gym.util.truncate import truncate, truncate_lines
from tests.test_main import main
def test_truncate_no_truncation():
assert truncate("abc") == "abc"
assert truncate("abcdef\nabcdef", max_line_len=7, max_lines=2) == "abcdef\nabcdef"
def test_truncate_single_line():
assert truncate("abcdefghijklmnop", max_line_len=5) == "ab..."
def test_truncate_dual_lines():
assert (
truncate("abcdefghijklmnop\nbcdefghijklmnop", max_line_len=5, max_lines=3)
== "ab...\nbc..."
)
def test_truncate_final_line():
assert truncate("abc\ndef\n123", max_line_len=5, max_lines=2) == "abc\nde..."
assert truncate("abc\ndef\n123", max_line_len=10, max_lines=2) == "abc\ndef..."
def test_truncate_lines_no_truncation():
assert truncate_lines(["abc"]) == "abc"
assert (
truncate_lines(["abcdef", "abcdef"], max_line_len=7, max_lines=2)
== "abcdef\nabcdef"
)
def test_truncate_lines_single_line():
assert truncate_lines(["abcdefghijklmnop"], max_line_len=5) == "ab..."
def test_truncate_lines_dual_lines():
assert (
truncate_lines(
["abcdefghijklmnop", "bcdefghijklmnop"], max_line_len=5, max_lines=3
)
== "ab...\nbc..."
)
def test_truncate_lines_dual_lines_generator():
def gen():
yield "abcdefghijklmnop"
yield "bcdefghijklmnop"
assert truncate_lines(gen(), max_line_len=5, max_lines=3) == "ab...\nbc..."
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/truncate_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/util/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.util.commands."""
import subprocess
import pytest
from compiler_gym.util.commands import Popen, communicate
from tests.test_main import main
def test_communicate_timeout():
with pytest.raises(subprocess.TimeoutExpired):
with subprocess.Popen(["sleep", "60"]) as process:
communicate(process, timeout=1)
assert process.poll() is not None # Process is dead.
def test_popen():
with Popen(["echo"]) as process:
communicate(process, timeout=60)
assert process.poll() is not None # Process is dead.
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/commands_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:filesystem."""
from pathlib import Path
import pytest
from compiler_gym.util import filesystem
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_atomic_file_write_path(tmpwd: Path):
out = Path("a").resolve()
assert not out.is_file()
with filesystem.atomic_file_write(out) as tmp_out:
assert tmp_out != out
assert tmp_out.parent == out.parent
# Write to the temporary file as normal.
with open(tmp_out, "w") as f:
f.write("Hello!")
with open(out) as f:
assert f.read() == "Hello!"
assert not tmp_out.is_file()
def test_atomic_file_write_binary_io(tmpwd: Path):
out = Path("a").resolve()
with filesystem.atomic_file_write(out, fileobj=True) as f:
f.write("Hello!".encode("utf-8"))
with open(out) as f:
assert f.read() == "Hello!"
def test_atomic_file_write_text_io(tmpwd: Path):
out = Path("a").resolve()
with filesystem.atomic_file_write(out, fileobj=True, mode="w") as f:
f.write("Hello!")
with open(out) as f:
assert f.read() == "Hello!"
@pytest.mark.parametrize(
"path",
[
"/",
"/dev/null",
Path("/"),
Path("/dev/null"),
],
)
def test_not_is_in_memory(path):
assert not filesystem.is_in_memory(path)
@pytest.mark.parametrize(
"path",
[
"/dev/shm",
"/dev/shm/foo",
Path("/dev/shm"),
Path("/dev/shm/foo"),
],
)
def test_is_in_memory(path):
assert filesystem.is_in_memory(path)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/filesystem_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:temporary_working_directory."""
import os
import tempfile
from pathlib import Path
from compiler_gym.util.temporary_working_directory import temporary_working_directory
from tests.test_main import main
def test_temporary_working_directory_tempdir():
with temporary_working_directory() as cwdir:
# Suffix test rather than equality test because on macOS temporary
# directories can have a /private prefix.
assert os.getcwd().endswith(str(cwdir))
assert cwdir.is_dir()
assert not list(cwdir.iterdir())
(cwdir / "test").touch()
assert (cwdir / "test").is_file()
# Out of scope, the directory is removed.
assert not cwdir.is_dir()
def test_temporary_working_directory():
with tempfile.TemporaryDirectory() as d:
path = Path(d)
with temporary_working_directory(path) as cwdir:
assert path == cwdir
# Suffix test rather than equality test because on macOS temporary
# directories can have a /private prefix.
assert os.getcwd().endswith(str(path))
assert cwdir.is_dir()
assert not list(cwdir.iterdir())
(cwdir / "test").touch()
assert (cwdir / "test").is_file()
# Out of scope, the directory is preserved.
assert path.is_dir()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/temporary_working_directory_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:statistics."""
from pytest import approx
from compiler_gym.util.statistics import geometric_mean
from tests.test_main import main
def test_geometric_mean_empty_list():
assert geometric_mean([]) == 0
def test_geometric_mean_zero_value():
assert geometric_mean([0, 1, 2]) == 0
def test_geometric_mean_negative():
assert geometric_mean([-1, 1, 2]) == 0
def test_geometric_mean_123():
assert geometric_mean([1, 2, 3]) == approx(1.8171205928321)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/statistics_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/util:minimize_trajectory."""
import logging
import sys
from typing import List
import pytest
from compiler_gym.util import minimize_trajectory as mt
from compiler_gym.util.gym_type_hints import ActionType
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# Verbose logging for tests.
logging.basicConfig(level=logging.DEBUG)
class MockActionSpace:
"""A mock action space for use by MockEnv."""
def __init__(self, actions):
self.flags = {a: str(a) for a in set(actions)}
class MockValidationResult:
"""A mock validation result for use by MockEnv."""
def __init__(self, okay):
self._okay = okay
def okay(self):
return self._okay
class MockEnv:
"""A mock environment for testing trajectory minimization."""
def __init__(self, actions: List[ActionType], validate=lambda env: True):
self.original_trajectory = actions
self.actions = actions.copy()
self.validate = lambda: MockValidationResult(validate(self))
self.benchmark = "benchmark"
self.action_space = MockActionSpace(set(actions))
def reset(self, benchmark):
self.actions = []
assert benchmark == self.benchmark
def multistep(self, actions):
for action in actions:
assert action in self.original_trajectory
self.actions += actions
return None, None, False, {}
def make_hypothesis(val: int):
"""Create a hypothesis that checks if `val` is in actions."""
def hypothesis(env):
print("hypothesis?()", env.actions, val in env.actions, file=sys.stderr)
return val in env.actions
return hypothesis
@pytest.mark.parametrize("n", range(10))
def test_bisect_explicit_hypothesis(n: int):
"""Test that bisection chops off the tail."""
env = MockEnv(actions=list(range(10)))
list(mt.bisect_trajectory(env, make_hypothesis(n)))
assert env.actions == list(range(n + 1))
@pytest.mark.parametrize("n", range(10))
def test_bisect_implicit_hypothesis(n: int):
"""Test bisection again but using the implicit hypothesis that
env.validate() fails.
"""
env = MockEnv(
actions=list(range(10)), validate=lambda env: not make_hypothesis(n)(env)
)
list(mt.bisect_trajectory(env))
assert env.actions == list(range(n + 1))
@pytest.mark.parametrize("n", range(10))
def test_reverse_bisect(n: int):
"""Test that reverse bisection chops off the prefix."""
env = MockEnv(actions=list(range(10)))
list(mt.bisect_trajectory(env, make_hypothesis(n), reverse=True))
assert env.actions == list(range(n, 10))
def test_minimize_trajectory_iteratively():
"""Test that reverse bisection chops off the prefix."""
env = MockEnv(actions=list(range(10)))
minimized = [0, 3, 4, 5, 8, 9]
def hypothesis(env):
return all(x in env.actions for x in minimized)
list(mt.minimize_trajectory_iteratively(env, hypothesis))
assert env.actions == minimized
def test_minimize_trajectory_iteratively_no_effect():
"""Test that reverse bisection chops off the prefix."""
env = MockEnv(actions=list(range(10)))
minimized = list(range(10))
def hypothesis(env):
return env.actions == minimized
list(mt.minimize_trajectory_iteratively(env, hypothesis))
assert env.actions == minimized
def test_random_minimization():
"""Test that random minimization reduces trajectory."""
env = MockEnv(actions=list(range(10)))
minimized = [0, 1, 4]
def hypothesis(env):
return all(x in env.actions for x in minimized)
list(mt.random_minimization(env, hypothesis))
assert len(env.actions) <= 10
assert len(env.actions) >= len(minimized)
assert all(a in list(range(10)) for a in env.actions)
def test_random_minimization_no_effect():
"""Test random minimization when there's no improvement to be had."""
env = MockEnv(actions=list(range(10)))
minimized = list(range(10))
def hypothesis(env):
return env.actions == minimized
list(mt.random_minimization(env, hypothesis))
assert env.actions == minimized
def test_minimize_trajectory_iteratively_llvm_crc32(env):
"""Test trajectory minimization on a real environment."""
env.reset(benchmark="cbench-v1/crc32")
env.multistep(
[
env.action_space["-mem2reg"],
env.action_space["-gvn"],
env.action_space["-reg2mem"],
]
)
def hypothesis(env):
return (
env.action_space["-mem2reg"] in env.actions
and env.action_space["-reg2mem"] in env.actions
)
list(mt.minimize_trajectory_iteratively(env, hypothesis))
assert env.actions == [
env.action_space["-mem2reg"],
env.action_space["-reg2mem"],
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/minimize_trajectory_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import numpy as np
import compiler_gym.util.permutation as permutation
from tests.test_main import main
def test_permutation_number_mapping():
original_permutation = np.array([4, 3, 1, 5, 2, 6, 0], dtype=int)
permutation_number = permutation.convert_permutation_to_number(original_permutation)
mapped_permutation = permutation.convert_number_to_permutation(
n=permutation_number, permutation_size=len(original_permutation)
)
assert np.array_equal(original_permutation, mapped_permutation)
original_permutation2 = np.array([2, 0, 5, 1, 4, 6, 3], dtype=int)
permutation_number2 = permutation.convert_permutation_to_number(
original_permutation2
)
mapped_permutation2 = permutation.convert_number_to_permutation(
n=permutation_number2, permutation_size=len(original_permutation2)
)
np.testing.assert_array_equal(original_permutation2, mapped_permutation2)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/util/permutation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //compiler_gym/bin:manual_env."""
import re
import sys
from difflib import unified_diff
from io import StringIO
from random import seed
import pytest
from absl import app, flags
from compiler_gym.bin.manual_env import main
from compiler_gym.util.capture_output import capture_output
from tests.test_main import main as _test_main
FLAGS = flags.FLAGS
def io_check(input, output, rnd_seed=100):
"""Run the shell with the given input and check the output matches the
output regex"""
seed(rnd_seed)
old_stdin = sys.stdin
try:
with capture_output() as out:
try:
sys.stdin = StringIO(input)
main(["argv0", "--env=llvm-v0"])
except SystemExit:
pass # Expected behaviour is to call sys.exit().
print(out.stdout)
pattern = (
r"""Initialized environment in [0-9.mu]*s
Welcome to the CompilerGym Shell!
---------------------------------
Type help or \? for more information.
The 'tutorial' command will give a step by step guide.
"""
+ output
+ r"""
compiler_gym:[a-zA-Z0-9/-]+> Exiting
"""
)
# Strip ANSI escape sequences from output that are used for formatting.
ansi_escape = re.compile(r"\x1B(?:[@-Z\\-_]|\[[0-?]*[ -/]*[@-~])")
stdout = ansi_escape.sub("", out.stdout)
# Strip trailing whitespace from output.
stdout = "\n".join(n.rstrip() for n in stdout.split("\n"))
if not re.match(pattern, stdout):
# Create a diff between the expected regex and the actual output.
# Diffing a regex will create a lot of false-positives, since any
# character groups or other expressions will be different, but can
# still be helful for tracking down the important differences.
diff = unified_diff(
pattern.split("\n"),
stdout.split("\n"),
fromfile="Expected output regex",
tofile="Actual output",
)
pytest.fail("\n".join(diff))
finally:
sys.stdin = old_stdin
def test_list_datasets():
FLAGS.unparse_flags()
io_check(
"""list_datasets""", r"""compiler_gym:cbench-v1/qsort> .*cbench-v[0-9]+.*"""
)
def test_list_benchmarks():
FLAGS.unparse_flags()
io_check(
"""list_benchmarks""",
r"""compiler_gym:cbench-v1/qsort> .*cbench-v[0-9]+/adpcm.*""",
)
def test_list_actions():
FLAGS.unparse_flags()
io_check(
"""list_actions""", r"""compiler_gym:cbench-v1/qsort> .*-adce.* -strip.*"""
)
def test_list_rewards():
FLAGS.unparse_flags()
io_check(
"""list_rewards""",
r"""compiler_gym:cbench-v1/qsort> .*IrInstructionCount.* TextSizeOz.*""",
)
def test_list_observations():
FLAGS.unparse_flags()
io_check(
"""list_observations""",
r"""compiler_gym:cbench-v1/qsort> Autophase, .*, TextSizeOz""",
)
def test_set_benchmark():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s""",
)
def test_actions_stack_back_stack():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
action -mem2reg -adce -adce
stack
back
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Action -adce
Action -adce
No effect
Actions -mem2reg -adce -adce in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
3 | -adce | False | False | - | 0
2 | -adce | True | False | - | 0
1 | -mem2reg | True | False | - | 0
0 | <init> | False | False | 0 | 0
compiler_gym:cbench-v[0-9]+/adpcm> Undid -adce in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
2 | -adce | True | False | - | 0
1 | -mem2reg | True | False | - | 0
0 | <init> | False | False | 0 | 0""",
)
def test_reward():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
action -mem2reg
reward
reward IrInstructionCountNorm
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Reward: 287.000000
Actions -mem2reg in [0-9.mu]*s with reward 287.0.
compiler_gym:cbench-v[0-9]+/adpcm> 0.000000
Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> 0.506173
Reward IrInstructionCountNorm in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
1 | -mem2reg | True | False | 287 | 287
0 | <init> | False | False | 0 | 0
""",
)
def test_observation():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_observation IrInstructionCount
action -mem2reg
observation
observation IrInstructionCountOz
""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Observation IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Observation: 280
Actions -mem2reg in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm> 280
Observation IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> 209
Observation IrInstructionCountOz in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> 209
Observation IrInstructionCountOz in [0-9.mu]*s""",
)
def test_try_all_actions():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
try_all_actions""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action: -add-discriminators Reward: 0.000000
Action: -adce Reward: 1.000000
(.|\n)*
Got actions in [0-9.mu]*s
Action | Effect | Done | Reward
---------------------------------+----------+--------+---------
-mem2reg | True | False | 181
-sroa | True | False | 181
-newgvn | True | False | 74
-gvn | True | False | 72
(.|\n)*
-structurizecfg | True | False | -25
-bounds-checking | True | False | -60""",
)
def test_simplify_stack():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
action -mem2reg -adce -adce
simplify_stack
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Reward: 287.000000
Action -adce
Reward: 2.000000
Action -adce
Reward: 0.000000
No effect
Actions -mem2reg -adce -adce in [0-9.mu]*s with reward 289.0.
compiler_gym:cbench-v[0-9]+/adpcm>
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
2 | -adce | True | False | 2 | 289
1 | -mem2reg | True | False | 287 | 287
0 | <init> | False | False | 0 | 0""",
)
def test_simplify_stack_no_reward():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
action -mem2reg -adce -adce
simplify_stack
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Action -adce
Action -adce
No effect
Actions -mem2reg -adce -adce in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm>
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
2 | -adce | True | False | - | 0
1 | -mem2reg | True | False | - | 0
0 | <init> | False | False | 0 | 0""",
)
def test_hill_climb(monkeypatch):
FLAGS.unparse_flags()
i = 0
def incr():
nonlocal i
i += 1
return i
monkeypatch.setattr("random.randrange", lambda _: incr())
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
hill_climb 2
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Step: 1 Action: -adce Reward: 1.000000 Accept: True
Step: 2 Action: -aggressive-instcombine Reward: 0.000000 Accept: False
Hill climb complete in [0-9.mu]*s. Accepted 1 of 2 steps for total reward of 1.0.
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
1 | -adce | True | False | 1 | 1
0 | <init> | False | False | 0 | 0""",
)
def test_greedy():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
set_default_reward IrInstructionCount
greedy
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Reward IrInstructionCount in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action: -add-discriminators Reward: 0.000000
Action: -adce Reward: 1.000000
(.|\n)*
Action: -mem2reg Reward: 287.000000
(.|\n)*
Action: -mergereturn Reward: -1.000000
Step: 1 Selected action: -mem2reg Reward: 287.000000
Greedy 1 steps in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
1 | -mem2reg | True | False | 181 | 181
0 | <init> | False | False | 0 | 0""",
)
def test_actions_string():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
action -mem2reg -adce
commandline""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Action -adce
Actions -mem2reg -adce in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm> \$ opt -mem2reg -adce input.bc -o output.bc""",
)
def test_reset():
FLAGS.unparse_flags()
io_check(
"""set_benchmark cbench-v1/adpcm
action -mem2reg -adce
reset
stack""",
r"""compiler_gym:cbench-v1/qsort> Reset benchmark://cbench-v[0-9]+/adpcm environment in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Action -mem2reg
Action -adce
Actions -mem2reg -adce in [0-9.mu]*s with reward 0.
compiler_gym:cbench-v[0-9]+/adpcm> Reset in [0-9.mu]*s
compiler_gym:cbench-v[0-9]+/adpcm> Depth | Action | Effect | Done | Reward | Cumulative Reward
---------+----------+----------+--------+----------+---------------------
0 | <init> | False | False | 0 | 0""",
)
def test_unrecognized_flags():
FLAGS.unparse_flags()
with pytest.raises(app.UsageError) as ctx:
main(["argv0", "unknown-option"])
assert str(ctx.value) == "Unknown command line arguments: ['unknown-option']"
def test_missing_required_flag():
FLAGS.unparse_flags()
with pytest.raises(app.UsageError) as ctx:
main(["argv0"])
assert str(ctx.value) == "--env must be set"
def test_ls_env():
FLAGS.unparse_flags()
with capture_output() as out:
try:
main(["argv0", "--ls_env"])
except SystemExit:
pass # Expected behaviour is to call sys.exit().
assert "llvm-" in out.stdout
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
tests/bin/manual_env_bin_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:validate."""
import tempfile
from io import StringIO
from pathlib import Path
from typing import List
import pytest
from compiler_gym.bin.validate import main
from compiler_gym.util.capture_output import capture_output
from tests.pytest_plugins.common import set_command_line_flags, skip_on_ci
from tests.test_main import main as _test_main
def test_okay_llvm_result(monkeypatch):
stdin = """
benchmark,reward,commandline,walltime
benchmark://cbench-v1/crc32,0,opt input.bc -o output.bc,0.3
""".strip()
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
main(["argv0", "-"])
assert "✅ cbench-v1/crc32 " in out.stdout
assert not out.stderr
def test_okay_llvm_result_file_input():
with tempfile.TemporaryDirectory() as d:
path = Path(d) / "test.csv"
with open(str(path), "w") as f:
f.write(
"""
benchmark,reward,commandline,walltime
benchmark://cbench-v1/crc32,0,opt input.bc -o output.bc,0.3
""".strip()
)
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
with capture_output() as out:
main(["argv0", str(path)])
assert "✅ cbench-v1/crc32 " in out.stdout
assert not out.stderr
def test_no_input(monkeypatch):
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
monkeypatch.setattr("sys.stdin", StringIO(""))
with capture_output() as out:
with pytest.raises(SystemExit):
main(["argv0", "-"])
assert "No inputs to validate" in out.stderr
def test_invalid_reward_llvm_result(monkeypatch):
stdin = """
benchmark,reward,commandline,walltime
benchmark://cbench-v1/crc32,0.5,opt input.bc -o output.bc,0.3
""".strip()
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
with pytest.raises(SystemExit):
main(["argv0", "-"])
assert (
"❌ cbench-v1/crc32 Expected reward 0.5 but received reward 0.0\n"
in out.stdout
)
assert not out.stderr
def test_invalid_csv_format(monkeypatch):
stdin = "invalid\ncsv\nformat"
set_command_line_flags(["argv0", "--env=llvm-ic-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
with pytest.raises(SystemExit):
main(["argv0", "-"])
assert "Expected 4 columns in the first row of CSV" in out.stderr
@skip_on_ci
def test_multiple_valid_inputs(monkeypatch):
stdin = """
benchmark,reward,walltime,commandline
benchmark://cbench-v1/crc32,,0,opt input.bc -o output.bc
benchmark://cbench-v1/crc32,,0,opt input.bc -o output.bc
benchmark://cbench-v1/crc32,,0,opt input.bc -o output.bc
""".strip()
set_command_line_flags(["argv0", "--env=llvm-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
main(["argv0", "-"])
assert not out.stderr
assert out.stdout.count("✅") == 3 # Every benchmark passed.
@skip_on_ci
@pytest.mark.parametrize(
"benchmarks",
[
[
"benchmark://cbench-v1/gsm",
"benchmark://cbench-v1/lame",
"benchmark://cbench-v1/stringsearch",
"benchmark://cbench-v1/ghostscript",
],
[
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/sha",
"benchmark://cbench-v1/ispell",
"benchmark://cbench-v1/blowfish",
],
[
"benchmark://cbench-v1/adpcm",
"benchmark://cbench-v1/tiffdither",
"benchmark://cbench-v1/bzip2",
"benchmark://cbench-v1/stringsearch2",
],
[
"benchmark://cbench-v1/bitcount",
"benchmark://cbench-v1/jpeg-d",
"benchmark://cbench-v1/jpeg-c",
"benchmark://cbench-v1/dijkstra",
],
[
"benchmark://cbench-v1/rijndael",
"benchmark://cbench-v1/patricia",
"benchmark://cbench-v1/tiff2rgba",
"benchmark://cbench-v1/crc32",
],
[
"benchmark://cbench-v1/tiff2bw",
"benchmark://cbench-v1/tiffmedian",
"benchmark://cbench-v1/susan",
],
],
)
def test_validate_cbench_null_options(monkeypatch, benchmarks: List[str]):
stdin = "\n".join(
[
"benchmark,reward,walltime,commandline",
]
+ [f"{b},,0,opt input.bc -o output.bc" for b in benchmarks]
)
set_command_line_flags(["argv0", "--env=llvm-v0"])
monkeypatch.setattr("sys.stdin", StringIO(stdin))
with capture_output() as out:
main(["argv0", "-"])
assert not out.stderr
assert out.stdout.count("✅") == len(benchmarks) # Every benchmark passed.
if __name__ == "__main__":
_test_main()
|
CompilerGym-development
|
tests/bin/validate_bin_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/bin/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/bin:service."""
import sys
import gym
import pytest
from absl import flags
import compiler_gym
from compiler_gym.bin.service import print_service_capabilities
from compiler_gym.errors import EnvironmentNotSupported
from tests.test_main import main
@pytest.mark.parametrize("env_name", compiler_gym.COMPILER_GYM_ENVS)
@pytest.mark.xfail(
sys.platform == "darwin",
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
def test_print_service_capabilities_smoke_test(env_name: str):
flags.FLAGS(["argv0"])
try:
with gym.make(env_name) as env:
print_service_capabilities(env)
except EnvironmentNotSupported:
pass # Environment not supported on this test platform.
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/bin/service_bin_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.datasets.uri."""
from compiler_gym.datasets import BenchmarkUri
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_from_string_1():
uri = BenchmarkUri.from_string("benchmark://test-v0")
assert uri.scheme == "benchmark"
assert uri.dataset == "test-v0"
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "benchmark://test-v0"
def test_from_string_2():
uri = BenchmarkUri.from_string("test-v0")
assert uri.scheme == "benchmark"
assert uri.dataset == "test-v0"
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "benchmark://test-v0"
def test_from_string_3():
uri = BenchmarkUri.from_string("benchmark://test-v0")
assert uri.scheme == "benchmark"
assert uri.dataset == "test-v0"
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "benchmark://test-v0"
def test_from_string_4():
uri = BenchmarkUri.from_string(
"generator://csmith-v0/this path has whitespace/in/it"
)
assert uri.scheme == "generator"
assert uri.dataset == "csmith-v0"
assert uri.path == "/this path has whitespace/in/it"
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "generator://csmith-v0/this path has whitespace/in/it"
def test_from_string_5():
uri = BenchmarkUri.from_string("generator://csmith-v0/0")
assert uri.scheme == "generator"
assert uri.dataset == "csmith-v0"
assert uri.path == "/0"
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "generator://csmith-v0/0"
def test_from_string_6():
uri = BenchmarkUri.from_string("generator://csmith-v0?a=b&c=d#foo")
assert uri.scheme == "generator"
assert uri.dataset == "csmith-v0"
assert uri.path == ""
assert uri.params == {"a": ["b"], "c": ["d"]}
assert uri.fragment == "foo"
assert str(uri) == "generator://csmith-v0?a=b&c=d#foo"
def test_from_string_7():
uri = BenchmarkUri.from_string("")
assert uri.scheme == "benchmark"
assert uri.dataset == ""
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "benchmark:"
def test_from_string_8():
uri = BenchmarkUri.from_string("generator:")
assert uri.scheme == "generator"
assert uri.dataset == ""
assert uri.path == ""
assert uri.params == {}
assert uri.fragment == ""
assert str(uri) == "generator:"
def test_canonicalize_1():
assert BenchmarkUri.canonicalize("test-v0") == "benchmark://test-v0"
def test_startswith():
uri = BenchmarkUri.from_string("benchmark://test-v0/foo")
assert not uri.startswith("!!!")
assert uri.startswith("b")
assert uri.startswith("benchmark://test-v0/fo")
def test_endswith():
uri = BenchmarkUri.from_string("benchmark://test-v0/foo")
assert not uri.endswith("!!!")
assert uri.endswith("o")
assert uri.endswith("mark://test-v0/foo")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/uri_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/datasets:files_dataset_test."""
import tempfile
from pathlib import Path
import numpy as np
import pytest
from compiler_gym.datasets import FilesDataset
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
@pytest.fixture(scope="function")
def empty_dataset() -> FilesDataset:
with tempfile.TemporaryDirectory() as d:
yield FilesDataset(
name="benchmark://test-v0",
description="",
license="MIT",
dataset_root=Path(d) / "files",
site_data_base=Path(d) / "site_data",
)
@pytest.fixture(scope="function", params=["", "memoized-ids"])
def populated_dataset(request) -> FilesDataset:
with tempfile.TemporaryDirectory() as d:
df = Path(d) / "files"
(df / "a").mkdir(parents=True)
(df / "b").mkdir()
with open(df / "e.txt", "w") as f:
f.write("e")
(df / "f.txt").touch()
(df / "g.jpg").touch()
(df / "a" / "a.txt").touch()
(df / "a" / "b.txt").touch()
(df / "b" / "a.txt").touch()
(df / "b" / "b.txt").touch()
(df / "b" / "c.txt").touch()
(df / "b" / "d.jpg").touch()
yield FilesDataset(
name="benchmark://test-v0",
description="",
license="MIT",
dataset_root=Path(d) / "files",
site_data_base=Path(d) / "site_data",
memoize_uris=request.param == "memoized-ids",
)
def test_dataset_is_installed(empty_dataset: FilesDataset):
assert empty_dataset.installed
def test_empty_dataset(empty_dataset: FilesDataset):
assert empty_dataset.size == 0
assert list(empty_dataset.benchmark_uris()) == []
assert list(empty_dataset.benchmarks()) == []
def test_populated_dataset(populated_dataset: FilesDataset):
for _ in range(2):
assert list(populated_dataset.benchmark_uris()) == [
"benchmark://test-v0/e.txt",
"benchmark://test-v0/f.txt",
"benchmark://test-v0/g.jpg",
"benchmark://test-v0/a/a.txt",
"benchmark://test-v0/a/b.txt",
"benchmark://test-v0/b/a.txt",
"benchmark://test-v0/b/b.txt",
"benchmark://test-v0/b/c.txt",
"benchmark://test-v0/b/d.jpg",
]
assert populated_dataset.size == 9
def test_populated_dataset_benchmark_lookup(populated_dataset: FilesDataset):
bm = populated_dataset.benchmark("benchmark://test-v0/e.txt")
assert bm.uri == "benchmark://test-v0/e.txt"
assert bm.proto.uri == "benchmark://test-v0/e.txt"
assert bm.proto.program.contents.decode("utf-8") == "e"
def test_populated_dataset_first_file(populated_dataset: FilesDataset):
bm = next(populated_dataset.benchmarks())
assert bm.uri == "benchmark://test-v0/e.txt"
assert bm.proto.uri == "benchmark://test-v0/e.txt"
assert bm.proto.program.contents.decode("utf-8") == "e"
def test_populated_dataset_benchmark_lookup_not_found(populated_dataset: FilesDataset):
with pytest.raises(
LookupError, match=r"^Benchmark not found: benchmark://test-v0/not/a/file"
):
populated_dataset.benchmark("benchmark://test-v0/not/a/file")
def test_populated_dataset_with_file_extension_filter(populated_dataset: FilesDataset):
populated_dataset.benchmark_file_suffix = ".jpg"
assert list(populated_dataset.benchmark_uris()) == [
"benchmark://test-v0/g",
"benchmark://test-v0/b/d",
]
assert populated_dataset.size == 2
def test_populated_dataset_random_benchmark(populated_dataset: FilesDataset):
num_benchmarks = 3
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (
populated_dataset.random_benchmark(rng) for _ in range(num_benchmarks)
)
}
assert len(random_benchmarks) == num_benchmarks
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/files_dataset_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/datasets."""
from pathlib import Path
import numpy as np
import pytest
from compiler_gym.datasets.datasets import Datasets, round_robin_iterables
from compiler_gym.datasets.uri import BenchmarkUri
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
class MockDataset:
"""A mock Dataset class."""
def __init__(self, name):
self.name = name
self.installed = False
self.deprecated = False
self.benchmark_values = []
self.sort_order = 0
def install(self):
self.installed = True
def uninstall(self):
self.installed = False
def benchmark_uris(self):
return (b.uri for b in self.benchmark_values)
def benchmarks(self):
yield from self.benchmark_values
def benchmark_from_parsed_uri(self, uri: BenchmarkUri):
for b in self.benchmark_values:
if b.uri == str(uri):
return b
raise KeyError(str(uri))
def random_benchmark(self, random_state=None):
return random_state.choice(self.benchmark_values)
def __repr__(self):
return str(self.name)
class MockBenchmark:
"""A mock Benchmark class."""
def __init__(self, uri):
self.uri = uri
def __repr__(self):
return str(self.uri)
def test_enumerate_datasets_empty():
datasets = Datasets([])
assert list(datasets) == []
def test_enumerate_datasets():
da = MockDataset("benchmark://a")
db = MockDataset("benchmark://b")
datasets = Datasets((da, db))
assert list(datasets) == [da, db]
def test_enumerate_datasets_with_custom_sort_order():
da = MockDataset("benchmark://a")
db = MockDataset("benchmark://b")
db.sort_order = -1
datasets = Datasets((da, db))
assert list(datasets) == [db, da]
def test_enumerate_deprecated_datasets():
da = MockDataset("benchmark://a")
db = MockDataset("benchmark://b")
datasets = Datasets((da, db))
db.deprecated = True
assert list(datasets) == [da]
assert list(datasets.datasets(with_deprecated=True)) == [da, db]
def test_enumerate_datasets_deprecated_at_construction_time():
da = MockDataset("benchmark://a")
db = MockDataset("benchmark://b")
db.deprecated = True
datasets = Datasets((da, db))
assert list(datasets) == [da]
assert list(datasets.datasets(with_deprecated=True)) == [da, db]
def test_datasets_add_dataset():
datasets = Datasets([])
da = MockDataset("benchmark://foo-v0")
datasets["benchmark://foo-v0"] = da
assert list(datasets) == [da]
def test_datasets_add_deprecated_dataset():
datasets = Datasets([])
da = MockDataset("benchmark://a")
da.deprecated = True
datasets["benchmark://foo-v0"] = da
assert list(datasets) == []
def test_datasets_remove():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
del datasets["benchmark://foo-v0"]
assert list(datasets) == []
def test_datasets_get_item():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
assert datasets.dataset("benchmark://foo-v0") == da
assert datasets["benchmark://foo-v0"] == da
def test_datasets_contains():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
assert "benchmark://foo-v0" in datasets
def test_datasets_get_item_default_scheme():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
assert datasets.dataset("foo-v0") == da
assert datasets["foo-v0"] == da
def test_datasets_get_item_lookup_miss():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
with pytest.raises(LookupError, match=r"^Dataset not found: benchmark://bar-v0$"):
datasets.dataset("benchmark://bar-v0")
with pytest.raises(LookupError, match=r"^Dataset not found: benchmark://bar-v0$"):
_ = datasets["benchmark://bar-v0"]
def test_datasets_contains_lookup_miss():
da = MockDataset("benchmark://foo-v0")
datasets = Datasets([da])
assert "benchmark://bar-v0" not in datasets
def test_benchmark_lookup_by_uri():
da = MockDataset("benchmark://foo-v0")
db = MockDataset("benchmark://bar-v0")
ba = MockBenchmark(uri="benchmark://foo-v0/abc")
da.benchmark_values.append(ba)
datasets = Datasets([da, db])
assert datasets.benchmark("benchmark://foo-v0/abc") == ba
def test_round_robin():
iters = iter(
[
iter([0, 1, 2, 3, 4, 5]),
iter(["a", "b", "c"]),
iter([0.5, 1.0]),
]
)
assert list(round_robin_iterables(iters)) == [
0,
"a",
0.5,
1,
"b",
1.0,
2,
"c",
3,
4,
5,
]
def test_benchmark_uris_order():
da = MockDataset("benchmark://foo-v0")
db = MockDataset("benchmark://bar-v0")
ba = MockBenchmark(uri="benchmark://foo-v0/abc")
bb = MockBenchmark(uri="benchmark://foo-v0/123")
bc = MockBenchmark(uri="benchmark://bar-v0/abc")
bd = MockBenchmark(uri="benchmark://bar-v0/123")
da.benchmark_values.append(ba)
da.benchmark_values.append(bb)
db.benchmark_values.append(bc)
db.benchmark_values.append(bd)
datasets = Datasets([da, db])
assert list(datasets.benchmark_uris()) == [b.uri for b in datasets.benchmarks()]
# Datasets are ordered by name, so bar-v0 before foo-v0.
assert list(datasets.benchmark_uris()) == [
"benchmark://bar-v0/abc",
"benchmark://foo-v0/abc",
"benchmark://bar-v0/123",
"benchmark://foo-v0/123",
]
def test_benchmarks_iter_deprecated():
da = MockDataset("benchmark://foo-v0")
db = MockDataset("benchmark://bar-v0")
db.deprecated = True
ba = MockBenchmark(uri="benchmark://foo-v0/abc")
bb = MockBenchmark(uri="benchmark://foo-v0/123")
bc = MockBenchmark(uri="benchmark://bar-v0/abc")
bd = MockBenchmark(uri="benchmark://bar-v0/123")
da.benchmark_values.append(ba)
da.benchmark_values.append(bb)
db.benchmark_values.append(bc)
db.benchmark_values.append(bd)
datasets = Datasets([da, db])
# Iterate over the benchmarks. The deprecated dataset is not included.
assert list(datasets.benchmark_uris()) == [b.uri for b in datasets.benchmarks()]
assert list(datasets.benchmark_uris()) == [
"benchmark://foo-v0/abc",
"benchmark://foo-v0/123",
]
# Repeat the above, but include the deprecated datasets.
assert list(datasets.benchmark_uris(with_deprecated=True)) == [
b.uri for b in datasets.benchmarks(with_deprecated=True)
]
assert list(datasets.benchmark_uris(with_deprecated=True)) == [
"benchmark://bar-v0/abc",
"benchmark://foo-v0/abc",
"benchmark://bar-v0/123",
"benchmark://foo-v0/123",
]
@pytest.mark.parametrize("weighted", [False, True])
def test_random_benchmark(mocker, weighted: bool):
da = MockDataset("benchmark://foo-v0")
da.size = 10
ba = MockBenchmark(uri="benchmark://foo-v0/abc")
da.benchmark_values.append(ba)
datasets = Datasets([da])
mocker.spy(da, "random_benchmark")
num_benchmarks = 5
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (
datasets.random_benchmark(rng, weighted=weighted)
for _ in range(num_benchmarks)
)
}
assert da.random_benchmark.call_count == num_benchmarks
assert len(random_benchmarks) == 1
assert next(iter(random_benchmarks)) == "benchmark://foo-v0/abc"
def test_dataset_proto_scheme(tmpdir):
"""Test the proto:// scheme handler."""
tmpdir = Path(tmpdir)
datasets = Datasets(datasets={})
proto = BenchmarkProto(uri="hello world")
with open(tmpdir / "file.pb", "wb") as f:
f.write(proto.SerializeToString())
benchmark = datasets.benchmark(f"proto://{tmpdir}/file.pb")
assert benchmark.proto.uri == "hello world"
assert benchmark.uri == "benchmark://hello world"
def test_dataset_proto_scheme_file_not_found(tmpdir):
tmpdir = Path(tmpdir)
datasets = Datasets(datasets={})
with pytest.raises(FileNotFoundError):
datasets.benchmark(f"proto://{tmpdir}/not_a_file")
def test_dataset_file_scheme(tmpdir):
"""Test the file:// scheme handler."""
tmpdir = Path(tmpdir)
datasets = Datasets(datasets={})
with open(tmpdir / "file.dat", "w") as f:
f.write("hello, world")
benchmark = datasets.benchmark(f"file://{tmpdir}/file.dat")
assert benchmark.proto.uri == f"file://{tmpdir}/file.dat"
assert benchmark.proto.program.contents == b"hello, world"
assert benchmark.uri == f"file://{tmpdir}/file.dat"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/datasets_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/datasets."""
from pathlib import Path
import pytest
from compiler_gym.datasets.dataset import Dataset
from compiler_gym.datasets.uri import BenchmarkUri
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
# pylint: disable=abstract-method
def test_dataset_properties():
"""Test the dataset property values."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.name == "benchmark://test-v0"
assert dataset.scheme == "benchmark"
assert dataset.description == "A test dataset"
assert dataset.license == "MIT"
def test_dataset_optional_properties():
"""Test the default values of optional dataset properties."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.references == {} # Default value.
assert not dataset.deprecated
assert dataset.sort_order == 0
assert dataset.validatable == "No"
def test_dataset_default_version():
"""Test the dataset property values."""
dataset = Dataset(
name="benchmark://test",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.name == "benchmark://test"
assert dataset.scheme == "benchmark"
assert dataset.version == 0
def test_dataset_optional_properties_explicit_values():
"""Test the non-default values of optional dataset properties."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
references={"GitHub": "https://github.com/facebookresearch/CompilerGym"},
deprecated="Deprecation message",
sort_order=10,
validatable="Yes",
)
assert dataset.references == {
"GitHub": "https://github.com/facebookresearch/CompilerGym"
}
assert dataset.deprecated
assert dataset.sort_order == 10
assert dataset.validatable == "Yes"
def test_dataset_inferred_properties():
"""Test the values of inferred dataset properties."""
dataset = Dataset(
name="benchmark://test-v2",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.scheme == "benchmark"
assert dataset.version == 2
def test_dataset_properties_read_only(tmpwd: Path):
"""Test that dataset properties are read-only."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
with pytest.raises(AttributeError):
dataset.name = "benchmark://test-v1"
with pytest.raises(AttributeError):
dataset.description = "A test dataset"
with pytest.raises(AttributeError):
dataset.license = "MIT"
with pytest.raises(AttributeError):
dataset.site_data_path = tmpwd
def test_dataset_site_data_directory(tmpwd: Path):
"""Test the path generated for site data."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
# Use endswith() since tmpwd on macOS may have a '/private' prefix.
assert str(dataset.site_data_path).endswith(
str(tmpwd / "test" / "benchmark" / "test-v0")
)
assert not dataset.site_data_path.is_dir() # Dir is not created until needed.
def test_dataset_deprecation_message(tmpwd: Path):
"""Test that a deprecation warning is emitted on install()."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
deprecated="The cat sat on the mat",
)
with pytest.warns(DeprecationWarning, match="The cat sat on the mat"):
dataset.install()
def test_dataset_equality_and_sorting():
"""Test comparison operators between datasets."""
a = Dataset(
name="benchmark://a-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
a2 = Dataset(
name="benchmark://a-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
b = Dataset(
name="benchmark://b-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert a == a2
assert a != b
assert a < b
assert a <= b
assert b > a
assert b >= a
# String comparisons
assert a == "benchmark://a-v0"
assert a != "benchmark://b-v0"
assert a < "benchmark://b-v0"
# Sorting
assert sorted([a2, b, a]) == [
"benchmark://a-v0",
"benchmark://a-v0",
"benchmark://b-v0",
]
class DatasetForTesting(Dataset):
"""A dataset to use for testing."""
def __init__(self, benchmarks=None):
super().__init__(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
self._benchmarks = benchmarks or {
"benchmark://test-v0/a": 1,
"benchmark://test-v0/b": 2,
"benchmark://test-v0/c": 3,
}
def benchmark_uris(self):
return sorted(self._benchmarks)
def benchmark_from_parsed_uri(self, uri: BenchmarkUri):
return self._benchmarks[str(uri)]
@property
def size(self):
return len(self._benchmarks)
def test_dataset_size():
dataset = DatasetForTesting()
assert dataset.size == 3
assert len(dataset) == 3
def test_benchmarks_lookup_by_uri():
dataset = DatasetForTesting()
assert dataset.benchmark("benchmark://test-v0/b") == 2
assert dataset["benchmark://test-v0/b"] == 2
def test_benchmarks_iter():
dataset = DatasetForTesting()
assert list(dataset.benchmarks()) == [1, 2, 3]
assert list(dataset) == [1, 2, 3]
def test_with_site_data():
"""Test the dataset property values."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
site_data_base="test",
)
assert dataset.has_site_data
def test_without_site_data():
"""Test the dataset property values."""
dataset = Dataset(
name="benchmark://test-v0",
description="A test dataset",
license="MIT",
)
assert not dataset.has_site_data
with pytest.raises(
ValueError, match=r"^Dataset has no site data path: benchmark://test-v0$"
):
dataset.site_data_path # noqa
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/dataset_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/datasets:benchmark."""
from pathlib import Path
import pytest
from compiler_gym.datasets import Benchmark, BenchmarkSource
from compiler_gym.errors import ValidationError
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_benchmark_attribute_outside_init():
"""Test that new attributes can be added to Benchmark."""
benchmark = Benchmark(None)
benchmark.foobar = 123 # pylint: disable=attribute-defined-outside-init
assert benchmark.foobar == 123
def test_benchmark_subclass_attribute_outside_init():
"""Test that new attributes can be added to Benchmark subclass."""
class TestBenchmark(Benchmark):
pass
benchmark = TestBenchmark(None)
benchmark.foobar = 123 # pylint: disable=attribute-defined-outside-init
assert benchmark.foobar == 123
def test_benchmark_properties():
"""Test benchmark properties."""
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
assert benchmark.uri == "benchmark://example-compiler-v0/foobar"
assert benchmark.proto == BenchmarkProto(
uri="benchmark://example-compiler-v0/foobar"
)
def test_benchmark_immutable():
"""Test that benchmark properties are immutable."""
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
with pytest.raises(AttributeError):
benchmark.uri = 123
with pytest.raises(AttributeError):
benchmark.proto = 123
def test_add_validation_callbacks_values():
"""Test methods for adding and checking custom validation callbacks."""
def a(env):
pass
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
assert benchmark.validation_callbacks() == []
assert not benchmark.is_validatable()
benchmark.add_validation_callback(a)
assert benchmark.validation_callbacks() == [a]
assert benchmark.is_validatable()
benchmark.add_validation_callback(a)
assert benchmark.validation_callbacks() == [a, a]
def test_add_validation_callbacks_call_count():
"""Test that custom validation callbacks are called on validate()."""
a_call_count = 0
b_call_count = 0
def a(env):
nonlocal a_call_count
a_call_count += 1
def b(env):
nonlocal b_call_count
b_call_count += 1
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
benchmark.add_validation_callback(a)
errors = benchmark.validate(env=None)
assert errors == []
assert a_call_count == 1
assert b_call_count == 0
benchmark.add_validation_callback(b)
errors = benchmark.validate(env=None)
assert errors == []
assert a_call_count == 2
assert b_call_count == 1
def test_validation_callback_error():
"""Test error propagation from custom validation callback."""
def a(env):
yield ValidationError(type="Compilation Error")
yield ValidationError(type="Runtime Error")
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
benchmark.add_validation_callback(a)
errors = benchmark.validate(env=None)
assert errors == [
ValidationError(type="Compilation Error"),
ValidationError(type="Runtime Error"),
]
def test_validation_callback_error_iter():
"""Test error propagation from custom validation callback using iterable."""
def a(env):
yield ValidationError(type="Compilation Error")
yield ValidationError(type="Runtime Error")
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
benchmark.add_validation_callback(a)
errors = benchmark.ivalidate(env=None)
next(errors) == ValidationError(type="Compilation Error")
next(errors) == ValidationError(type="Runtime Error")
def test_validation_callback_flaky():
"""Test error propagation on callback which *may* fail."""
flaky = False
def a(env):
nonlocal flaky
del env
if flaky:
yield ValidationError(type="Runtime Error")
benchmark = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foobar"))
benchmark.add_validation_callback(a)
errors = benchmark.validate(env=None)
assert errors == []
flaky = True
errors = benchmark.validate(env=None)
assert errors == [
ValidationError(type="Runtime Error"),
]
def test_eq_benchmarks():
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
b = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
assert a == b
def test_eq_strings():
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
b = "benchmark://example-compiler-v0/foo"
assert a == b
def test_ne_benchmarks():
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
b = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/bar"))
assert a != b
def test_ne_strings():
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/foo"))
b = "benchmark://example-compiler-v0/bar"
assert a != b
def test_benchmark_sources(tmpwd: Path):
a = Benchmark(
BenchmarkProto(uri="benchmark://example-compiler-v0/foo"),
sources=[("example.py", "Hello, world!".encode("utf-8"))],
)
a.add_source(BenchmarkSource(filename="foo.py", contents="Hi".encode("utf-8")))
assert list(a.sources) == [
BenchmarkSource("example.py", "Hello, world!".encode("utf-8")),
BenchmarkSource(filename="foo.py", contents="Hi".encode("utf-8")),
]
a.write_sources_to_directory("benchmark_sources")
with open(tmpwd / "benchmark_sources" / "example.py") as f:
assert f.read() == "Hello, world!"
with open(tmpwd / "benchmark_sources" / "foo.py") as f:
assert f.read() == "Hi"
def test_benchmark_from_file(tmpwd: Path):
path = tmpwd / "foo.txt"
with open(path, "w") as f:
f.write("Hello, world!")
benchmark = Benchmark.from_file("benchmark://example-compiler-v0/foo", path)
assert benchmark.proto.program.contents.decode("utf-8") == "Hello, world!"
def test_benchmark_from_file_not_found(tmpwd: Path):
path = tmpwd / "foo.txt"
with pytest.raises(FileNotFoundError, match=str(path)):
Benchmark.from_file("benchmark://example-compiler-v0/foo", path)
def test_dataset_equality_and_sorting():
"""Test comparison operators between datasets."""
a = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/a"))
a2 = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/a"))
b = Benchmark(BenchmarkProto(uri="benchmark://example-compiler-v0/b"))
assert a == a2
assert a != b
assert a < b
assert a <= b
assert b > a
assert b >= a
# String comparisons
assert a == "benchmark://example-compiler-v0/a"
assert a != "benchmark://example-compiler-v0/b"
assert a < "benchmark://example-compiler-v0/b"
# Sorting
assert sorted([a2, b, a]) == [
"benchmark://example-compiler-v0/a",
"benchmark://example-compiler-v0/a",
"benchmark://example-compiler-v0/b",
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/datasets/benchmark_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/spaces:scalar."""
from compiler_gym.spaces import Commandline, CommandlineFlag
from tests.test_main import main
def test_sample():
space = Commandline(
[
CommandlineFlag(name="a", flag="-a", description=""),
CommandlineFlag(name="b", flag="-b", description=""),
CommandlineFlag(name="c", flag="-c", description=""),
],
name="test",
)
assert space.sample() in {0, 1, 2}
def test_contains():
space = Commandline(
[
CommandlineFlag(name="a", flag="-a", description=""),
CommandlineFlag(name="b", flag="-b", description=""),
CommandlineFlag(name="c", flag="-c", description=""),
],
name="test",
)
assert space.contains(0)
assert space.contains(1)
assert space.contains(2)
assert not space.contains(-11)
assert not space.contains(1.5)
assert not space.contains(4)
def test_to_and_from_string():
space = Commandline(
[
CommandlineFlag(name="a", flag="-a", description=""),
CommandlineFlag(name="b", flag="-b", description=""),
CommandlineFlag(name="c", flag="-c", description=""),
],
name="test",
)
assert space.to_string([0, 1, 2]) == "-a -b -c"
assert space.from_string(space.to_string([0, 1, 2])) == [0, 1, 2]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/commandline_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from gym.spaces import Discrete
from compiler_gym.spaces import Dict
from tests.test_main import main
def test_equal():
assert Dict({"a": Discrete(2), "b": Discrete(3)}, name="test_dict") == Dict(
{"a": Discrete(2), "b": Discrete(3)}, name="test_dict"
)
def test_not_equal():
dict_space = Dict({"a": Discrete(2), "b": Discrete(3)}, name="test_dict")
assert dict_space != Dict({"a": Discrete(2), "c": Discrete(3)}, name="test_dict")
assert dict_space != Dict({"a": Discrete(2)}, name="test_dict")
assert dict_space != Dict({"a": Discrete(2), "b": Discrete(3)}, name="test_dict_2")
assert dict_space != "not_a_dict"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/dict_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/spaces:sequence."""
from copy import deepcopy
import pytest
from compiler_gym.spaces import Scalar, Sequence, SpaceSequence
from tests.test_main import main
def test_sample():
space = Sequence(name="test", size_range=(0, None), dtype=int)
with pytest.raises(NotImplementedError):
space.sample()
def test_str_contains():
space = Sequence(name="test", size_range=(0, None), dtype=str)
assert space.contains("Hello, world!")
assert space.contains("")
assert not space.contains([1, 2, 3])
def test_str_contains_too_long():
space = Sequence(name="test", size_range=(0, 4), dtype=str)
assert not space.contains("Hello, world!")
assert space.contains("")
assert not space.contains([1, 2, 3])
def test_str_contains_too_short():
space = Sequence(name="test", size_range=(3, None), dtype=str)
assert space.contains("Hello, world!")
assert not space.contains("")
assert not space.contains([1, 2, 3])
def test_int_contains():
space = Sequence(name="test", size_range=(5, 5), dtype=int)
assert not space.contains(list(range(4)))
assert space.contains(list(range(5)))
assert not space.contains(list(range(6)))
def test_contains_with_float_scalar_range():
space = Sequence(
name="test",
size_range=(3, 3),
dtype=float,
scalar_range=Scalar(name="test", min=0, max=1, dtype=float),
)
assert space.contains([0.0, 0.0, 0.0])
assert space.contains([0.1, 1.0, 0.5])
assert not space.contains([0.0, 0.0, -1.0]) # out of bounds
assert not space.contains([0.0, 0, 0.1]) # wrong dtype
assert not space.contains([0.0, 0]) # wrong shape
def test_bytes_contains():
space = Sequence(name="test", size_range=(0, None), dtype=bytes)
assert space.contains(b"Hello, world!")
assert space.contains(b"")
assert not space.contains("Hello, world!")
def test_space_sequence_contains():
subspace = Scalar(name="subspace", min=0, max=1, dtype=float)
space_seq = SpaceSequence(name="seq", space=subspace, size_range=(0, 2))
assert space_seq.contains([0.5, 0.6])
assert not space_seq.contains(["not-a-number"])
assert not space_seq.contains([2.0])
assert not space_seq.contains([0.1, 0.2, 0.3])
def test_equal():
seq = Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq == deepcopy(seq)
def test_not_equal():
seq = Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq2",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[0, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 3],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 2],
dtype=float,
opaque_data_format="fmt",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt2",
scalar_range=[3, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[0, 4],
)
assert seq != Sequence(
name="seq",
size_range=[1, 2],
dtype=int,
opaque_data_format="fmt",
scalar_range=[3, 5],
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/sequence_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from gym.spaces import Discrete
from compiler_gym.spaces import Tuple
from tests.test_main import main
def test_equal():
assert Tuple([Discrete(2), Discrete(3)], name="test_tuple") == Tuple(
[Discrete(2), Discrete(3)], name="test_tuple"
)
def test_not_equal():
tuple_space = Tuple([Discrete(2), Discrete(3)], name="test_tuple")
assert tuple_space != Tuple([Discrete(3), Discrete(3)], name="test_tuple")
assert tuple_space != Tuple([Discrete(2)], name="test_tuple")
assert tuple_space != Tuple([Discrete(2), Discrete(3)], name="test_tuple_2")
assert tuple_space != "not_a_tuple"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/tuple_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym/spaces/action_space.py."""
from compiler_gym.spaces import ActionSpace, Discrete, NamedDiscrete
from tests.test_main import main
class MockActionSpace:
name = "mock"
foo = 1
def sample(self):
return 1
def seed(self, s):
pass
def contains(self, x):
pass
def __repr__(self) -> str:
return self.name
def test_action_space_forward(mocker):
a = MockActionSpace()
ma = ActionSpace(a)
assert ma.name == "mock"
assert ma.foo == 1
mocker.spy(a, "sample")
assert ma.sample() == 1
assert a.sample.call_count == 1
mocker.spy(a, "seed")
ma.seed(10)
assert a.seed.call_count == 1
mocker.spy(a, "contains")
10 in ma
assert a.contains.call_count == 1
def test_action_space_comparison():
a = MockActionSpace()
b = ActionSpace(a)
c = MockActionSpace()
assert b == a
assert b.wrapped == a
assert b != c
def test_action_space_default_string_conversion():
"""Test that to_string() and from_string() are forward to subclasses."""
a = Discrete(name="a", n=3)
ma = ActionSpace(a)
assert ma.to_string([0, 1, 0]) == "0,1,0"
assert ma.from_string("0,1,0") == [0, 1, 0]
def test_action_space_forward_string_conversion():
"""Test that to_string() and from_string() are forward to subclasses."""
a = NamedDiscrete(name="a", items=["a", "b", "c"])
ma = ActionSpace(a)
assert ma.to_string([0, 1, 2, 0]) == "a b c a"
assert ma.from_string("a b c a") == [0, 1, 2, 0]
def test_action_space_str():
ma = ActionSpace(MockActionSpace())
assert str(ma) == "ActionSpace(mock)"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/action_space_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/spaces/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/spaces:named_discrete."""
import pytest
from compiler_gym.spaces import NamedDiscrete
from tests.test_main import main
def test_empty_space():
with pytest.raises(ValueError, match="No values for discrete space"):
NamedDiscrete([], name="test")
def test_invalid_name_lookup():
space = NamedDiscrete(["foo"], name="test")
with pytest.raises(ValueError):
_ = space["bar"]
def test_space_size():
space = NamedDiscrete(["a", "b", "c"], name="test")
assert space.n == 3
def test_name_lookup():
space = NamedDiscrete(["a", "b", "c"], name="test")
assert space["a"] == 0
assert space["b"] == 1
assert space["c"] == 2
def test_repr():
space = NamedDiscrete(["foo", "bar"], name="test")
assert str(space) == "NamedDiscrete([foo, bar])"
def test_to_string():
space = NamedDiscrete(["foo", "bar"], name="test")
assert space.to_string(0) == "foo"
assert space.to_string([0]) == "foo"
assert space.to_string([0, 0, 1]) == "foo foo bar"
def test_equal():
assert NamedDiscrete(["a", "b"], name="test_named_discrete") == NamedDiscrete(
["a", "b"], name="test_named_discrete"
)
def test_not_equal():
named_discrete = NamedDiscrete(["a", "b"], name="test_named_discrete")
assert named_discrete != NamedDiscrete(["a", "bb"], name="test_named_discrete")
assert named_discrete != NamedDiscrete(["a", "b", "c"], name="test_named_discrete")
assert named_discrete != NamedDiscrete(["a", "b"], name="test_named_discrete_2")
assert named_discrete != "not_a_named_discrete"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/named_discrete_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for compiler_gym.spaces.Reward."""
from copy import deepcopy
from compiler_gym.spaces import Reward
from tests.test_main import main
def test_equal():
reward = Reward(
name="test_reward",
observation_spaces=["a", "b"],
default_value=5,
min=-10,
max=10,
default_negates_returns=True,
success_threshold=3,
deterministic=False,
platform_dependent=True,
)
assert reward == deepcopy(reward)
assert reward == "test_reward"
def test_not_equal():
reward = Reward(
name="test_reward",
observation_spaces=["a", "b"],
default_value=5,
min=-10,
max=10,
default_negates_returns=True,
success_threshold=3,
deterministic=False,
platform_dependent=True,
)
reward2 = deepcopy(reward)
reward2.name = "test_reward_2"
assert reward != reward2
assert reward != "test_reward_2"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/reward_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Unit tests for //compiler_gym/spaces:scalar."""
from copy import copy, deepcopy
from compiler_gym.spaces import Scalar
from tests.test_main import main
def test_sample():
space = Scalar(name="test", min=-10, max=10, dtype=int)
x = space.sample()
assert isinstance(x, int)
assert -10 <= x <= 10
def test_int_contains():
space = Scalar(name="test", min=-10, max=10, dtype=int)
assert space.contains(-10)
assert not space.contains(-11)
assert not space.contains(0.5)
def test_int_contains_no_upper_bound():
space = Scalar(name="test", min=0, max=None, dtype=int)
assert space.contains(0)
assert not space.contains(-1)
assert space.contains(1000)
def test_equality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=0, max=None, dtype=int)
assert space_a == space_b
def test_dtype_inequality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=0, max=None, dtype=float)
assert space_a != space_b
def test_upper_bound_inequality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=0, max=5, dtype=int)
assert space_a != space_b
def test_lower_bound_inequality():
space_a = Scalar(name="test", min=0, max=None, dtype=int)
space_b = Scalar(name="test", min=None, max=None, dtype=int)
assert space_a != space_b
def test_equal():
assert Scalar(name="test_scalar", min=-10, max=10, dtype=int) == Scalar(
name="test_scalar", min=-10, max=10, dtype=int
)
def test_not_equal():
scalar = Scalar(name="test_scalar", min=-10, max=10, dtype=int)
assert scalar != Scalar(name="test_scalar_2", min=-10, max=10, dtype=int)
assert scalar != Scalar(name="test_scalar", min=-5, max=10, dtype=int)
assert scalar != Scalar(name="test_scalar", min=-10, max=5, dtype=int)
assert scalar != Scalar(name="test_scalar", min=-10, max=10, dtype=float)
assert scalar != "not_as_scalar"
def test_deepcopy_regression_test():
"""Test to reproduce github.com/facebookresearch/CompilerGym/issues/768."""
x = Scalar(name="foo")
copy(x)
deepcopy(x)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/scalar_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from compiler_gym.spaces import Box
from tests.test_main import main
def test_equal():
assert Box(low=0, high=1, name="test_box", shape=[1, 2], dtype=int) == Box(
low=0, high=1, name="test_box", shape=[1, 2], dtype=int
)
assert Box(low=0, high=1, name="test_box", shape=[1, 2], dtype=int) == Box(
low=0, high=1, name="test_box", shape=[1, 2], dtype=float
)
def test_not_equal():
box = Box(low=0, high=1, name="test_box", shape=[1, 2], dtype=int)
assert box != Box(low=0, high=1, name="test_box_2", shape=[1, 2], dtype=int)
assert box != Box(low=-1, high=1, name="test_box", shape=[1, 2], dtype=int)
assert box != Box(low=0, high=2, name="test_box", shape=[1, 2], dtype=int)
assert box != Box(low=0, high=1, name="test_box", shape=[1, 3], dtype=int)
assert box != "not_a_box"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/box_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
from compiler_gym.spaces import Discrete
from tests.test_main import main
def test_equal():
assert Discrete(2, name="test_discrete") == Discrete(2, name="test_discrete")
def test_not_equal():
discrete = Discrete(2, name="test_discrete")
assert discrete != Discrete(3, name="test_discrete")
assert discrete != Discrete(2, name="test_discrete_2")
assert discrete != "not_a_discrete"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/discrete_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import pytest
from compiler_gym.spaces import Permutation, Scalar
from tests.test_main import main
def test_invalid_scalar_range_dtype():
with pytest.raises(
TypeError, match="Permutation space can have integral scalar range only."
):
Permutation(name="", scalar_range=Scalar(name="", min=0, max=2, dtype=float))
def test_equal():
assert Permutation(
name="perm", scalar_range=Scalar(name="range", min=0, max=2, dtype=int)
) == Permutation(
name="perm", scalar_range=Scalar(name="range", min=0, max=2, dtype=int)
)
def test_not_equal():
permutation = Permutation(
name="perm", scalar_range=Scalar(name="range", min=0, max=2, dtype=int)
)
assert permutation != Permutation(
name="perm", scalar_range=Scalar(name="range", min=0, max=1, dtype=int)
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/spaces/permutation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
from enum import Enum
from io import StringIO
from pathlib import Path
from typing import List
import gym
import pytest
from flaky import flaky
import compiler_gym
from compiler_gym.compiler_env_state import (
CompilerEnvStateReader,
CompilerEnvStateWriter,
)
from compiler_gym.envs import CompilerEnv, llvm
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from compiler_gym.errors import ServiceError
from compiler_gym.service.connection import CompilerGymServiceConnection
from tests.pytest_plugins import llvm as llvm_plugin
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="function", params=["local", "service"])
def env(request) -> CompilerEnv:
"""Create an LLVM environment."""
if request.param == "local":
with gym.make("llvm-v0") as env:
yield env
else:
service = CompilerGymServiceConnection(llvm.LLVM_SERVICE_BINARY)
try:
with LlvmEnv(service=service.connection.url) as env:
yield env
finally:
service.close()
def test_service_version(env: LlvmEnv):
assert env.version == compiler_gym.__version__
def test_compiler_version(env: LlvmEnv):
assert env.compiler_version.startswith("10.0.0")
def test_action_space_names(env: LlvmEnv, action_names: List[str]):
assert set(env.action_space.names) == set(action_names)
def test_action_spaces_names(env: LlvmEnv):
assert {a.name for a in env.action_spaces} == {"PassesAll"}
def test_all_flags_are_unique(env: LlvmEnv):
assert sorted(env.action_space.flags) == sorted(set(env.action_space.flags))
@pytest.mark.parametrize("benchmark_name", llvm_plugin.BENCHMARK_NAMES)
def test_benchmark_names(env: LlvmEnv, benchmark_name: str):
"""Check that all benchmark names can be found in the datasets."""
assert env.datasets.benchmark(benchmark_name)
@pytest.mark.parametrize("always_send_benchmark_on_reset", [False, True])
def test_double_reset(env: LlvmEnv, always_send_benchmark_on_reset: bool):
env.service.opts.always_send_benchmark_on_reset = always_send_benchmark_on_reset
env.reset(benchmark="cbench-v1/crc32")
env.reset(benchmark="cbench-v1/crc32")
assert env.in_episode
@flaky
def test_connection_dies_default_reward(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
env.reward_space.default_negates_returns = False
env.reward_space.default_value = 2.5
env.episode_reward = 10
# Kill the service. Note killing the service for a ManagedConnection will
# result in a ServiceError because we have not ended the session we started
# with env.reset() above. For UnmanagedConnection, this error will not be
# raised.
try:
env.service.close()
except ServiceError as e:
assert "Service exited with returncode " in str(e)
_, reward, done, _ = env.step(0)
assert done
assert reward == 2.5
@flaky
def test_connection_dies_default_reward_negated(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
env.reward_space.default_negates_returns = True
env.reward_space.default_value = 2.5
env.episode_reward = 10
# Kill the service. Note killing the service for a ManagedConnection will
# result in a ServiceError because we have not ended the session we started
# with env.reset() above. For UnmanagedConnection, this error will not be
# raised.
try:
env.service.close()
except ServiceError as e:
assert "Service exited with returncode " in str(e)
observation, reward, done, _ = env.step(0)
assert done
assert reward == -7.5 # negates reward.
def test_state_serialize_deserialize_equality(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
env.episode_reward = 10
state = env.state
assert state.reward == 10
buf = StringIO()
CompilerEnvStateWriter(buf).write_state(state)
buf.seek(0) # Rewind the buffer for reading.
state_from_csv = next(iter(CompilerEnvStateReader(buf)))
assert state_from_csv.reward == 10
assert state == state_from_csv
def test_apply_state(env: LlvmEnv):
"""Test that apply() on a clean environment produces same state."""
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
env.step(env.action_space.flags.index("-mem2reg"))
with gym.make("llvm-v0", reward_space="IrInstructionCount") as other:
other.apply(env.state)
assert other.state == env.state
def test_set_observation_space_from_spec(env: LlvmEnv):
env.observation_space = env.observation.spaces["Autophase"]
obs = env.observation_space
env.observation_space = "Autophase"
assert env.observation_space == obs
def test_set_reward_space_from_spec(env: LlvmEnv):
env.reward_space = env.reward.spaces["IrInstructionCount"]
reward = env.reward_space
env.reward_space = "IrInstructionCount"
assert env.reward_space == reward
def test_same_reward_after_reset(env: LlvmEnv):
"""Check that running the same action after calling reset() produces
same reward.
"""
env.reward_space = "IrInstructionCount"
env.benchmark = "cbench-v1/dijkstra"
action = env.action_space.flags.index("-instcombine")
env.reset()
_, reward_a, _, _ = env.step(action)
assert reward_a, "Sanity check that action produces a reward"
env.reset()
_, reward_b, _, _ = env.step(action)
assert reward_a == reward_b
def test_write_bitcode(env: LlvmEnv, tmpwd: Path):
env.reset(benchmark="cbench-v1/crc32")
env.write_bitcode("file.bc")
assert Path("file.bc").is_file()
def test_write_ir(env: LlvmEnv, tmpwd: Path):
env.reset(benchmark="cbench-v1/crc32")
env.write_bitcode("file.ll")
assert Path("file.ll").is_file()
def test_ir_sha1(env: LlvmEnv, tmpwd: Path):
env.reset(benchmark="cbench-v1/crc32")
before = env.ir_sha1
_, _, done, info = env.step(env.action_space.flags.index("-mem2reg"))
assert not done, info
assert not info["action_had_no_effect"], "sanity check failed, action had no effect"
after = env.ir_sha1
assert before != after
def test_generate_enum_declarations(env: LlvmEnv):
assert issubclass(llvm.observation_spaces, Enum)
assert issubclass(llvm.reward_spaces, Enum)
def test_step_multiple_actions_list(env: LlvmEnv):
"""Pass a list of actions to step()."""
env.reset(benchmark="cbench-v1/crc32")
actions = [
env.action_space.flags.index("-mem2reg"),
env.action_space.flags.index("-reg2mem"),
]
_, _, done, _ = env.multistep(actions)
assert not done
assert env.actions == actions
def test_step_multiple_actions_generator(env: LlvmEnv):
"""Pass an iterable of actions to step()."""
env.reset(benchmark="cbench-v1/crc32")
actions = (
env.action_space.flags.index("-mem2reg"),
env.action_space.flags.index("-reg2mem"),
)
_, _, done, _ = env.multistep(actions)
assert not done
assert env.actions == [
env.action_space.flags.index("-mem2reg"),
env.action_space.flags.index("-reg2mem"),
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/llvm_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LLVM benchmark handling."""
import pytest
from compiler_gym.envs import llvm
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.util.runfiles_path import runfiles_path
from tests.pytest_plugins.common import bazel_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
INVALID_IR_PATH = runfiles_path("tests/llvm/invalid_ir.ll")
@bazel_only # invalid_ir.ll not installed
def test_reset_invalid_ir(env: LlvmEnv):
"""Test that setting the $CXX to an invalid binary raises an error."""
benchmark = llvm.make_benchmark(INVALID_IR_PATH)
with pytest.raises(BenchmarkInitError, match="Failed to compute .text size cost"):
env.reset(benchmark=benchmark)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/invalid_ir_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LLVM benchmark handling."""
import re
import subprocess
import tempfile
from pathlib import Path
import gym
import pytest
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import LlvmEnv, llvm
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from compiler_gym.service.proto import File
from compiler_gym.third_party import llvm as llvm_paths
from compiler_gym.util.runfiles_path import runfiles_path
from compiler_gym.util.temporary_working_directory import temporary_working_directory
from tests.pytest_plugins.common import bazel_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The path of an IR file that assembles but does not compile.
INVALID_IR_PATH = runfiles_path("tests/llvm/invalid_ir.ll")
EXAMPLE_BITCODE_FILE = runfiles_path(
"compiler_gym/third_party/cbench/cbench-v1/crc32.bc"
)
EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT = 242
def test_reset_invalid_benchmark(env: LlvmEnv):
invalid_benchmark = "an invalid benchmark"
with pytest.raises(
LookupError, match=f"Dataset not found: benchmark://{invalid_benchmark}"
):
env.reset(benchmark=invalid_benchmark)
def test_invalid_benchmark_data(env: LlvmEnv):
benchmark = Benchmark.from_file_contents(
"benchmark://new", "Invalid bitcode".encode("utf-8")
)
with pytest.raises(
BenchmarkInitError, match='Failed to parse LLVM bitcode: "benchmark://new"'
):
env.reset(benchmark=benchmark)
def test_invalid_benchmark_missing_file(env: LlvmEnv):
benchmark = Benchmark(
BenchmarkProto(
uri="benchmark://new",
)
)
with pytest.raises(ValueError, match="No program set"):
env.reset(benchmark=benchmark)
def test_benchmark_path_empty_file(env: LlvmEnv):
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
(tmpdir / "test.bc").touch()
benchmark = Benchmark.from_file("benchmark://new", tmpdir / "test.bc")
with pytest.raises(BenchmarkInitError, match="Failed to parse LLVM bitcode"):
env.reset(benchmark=benchmark)
def test_invalid_benchmark_path_contents(env: LlvmEnv):
with tempfile.TemporaryDirectory() as tmpdir:
tmpdir = Path(tmpdir)
with open(str(tmpdir / "test.bc"), "w") as f:
f.write("Invalid bitcode")
benchmark = Benchmark.from_file("benchmark://new", tmpdir / "test.bc")
with pytest.raises(BenchmarkInitError, match="Failed to parse LLVM bitcode"):
env.reset(benchmark=benchmark)
def test_benchmark_path_invalid_scheme(env: LlvmEnv):
benchmark = Benchmark(
BenchmarkProto(
uri="benchmark://new", program=File(uri="invalid_scheme://test")
),
)
with pytest.raises(
ValueError,
match=(
"Invalid benchmark data URI. "
'Only the file:/// scheme is supported: "invalid_scheme://test"'
),
):
env.reset(benchmark=benchmark)
def test_custom_benchmark(env: LlvmEnv):
benchmark = Benchmark.from_file("benchmark://new", EXAMPLE_BITCODE_FILE)
env.reset(benchmark=benchmark)
assert env.benchmark == "benchmark://new"
def test_custom_benchmark_constructor():
benchmark = Benchmark.from_file("benchmark://new", EXAMPLE_BITCODE_FILE)
with gym.make("llvm-v0", benchmark=benchmark) as env:
env.reset()
assert env.benchmark == "benchmark://new"
def test_make_benchmark_single_bitcode(env: LlvmEnv):
benchmark = llvm.make_benchmark(EXAMPLE_BITCODE_FILE)
assert benchmark == f"benchmark://file-v0{EXAMPLE_BITCODE_FILE}"
assert benchmark.uri.scheme == "benchmark"
assert benchmark.uri.dataset == "file-v0"
with open(EXAMPLE_BITCODE_FILE, "rb") as f:
contents = f.read()
assert benchmark.proto.program.contents == contents
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
assert env.observation["IrInstructionCount"] == EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT
@bazel_only
def test_make_benchmark_single_ll():
"""Test passing a single .ll file into make_benchmark()."""
benchmark = llvm.make_benchmark(INVALID_IR_PATH)
assert str(benchmark.uri).startswith("benchmark://user-v0/")
assert benchmark.uri.scheme == "benchmark"
assert benchmark.uri.dataset == "user-v0"
def test_make_benchmark_single_clang_job(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "input.c"
with open(str(source), "w") as f:
f.write("int A() { return 0; }")
benchmark = llvm.make_benchmark(str(source))
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @A\(\)", env.observation["Ir"])
def test_make_benchmark_split_clang_job(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source_1 = Path(d) / "a.c"
source_2 = Path(d) / "b.c"
with open(str(source_1), "w") as f:
f.write("int B() { return A(); }")
with open(str(source_2), "w") as f:
f.write("int A() { return 0; }")
benchmark = llvm.make_benchmark(
[
str(source_1),
str(source_2),
]
)
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @A\(\)", env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @B\(\)", env.observation["Ir"])
def test_make_benchmark_single_clang_invocation_multiple_inputs():
with tempfile.TemporaryDirectory() as d:
source_1 = Path(d) / "a.c"
source_2 = Path(d) / "b.c"
with open(str(source_1), "w") as f:
f.write("int B() { return A(); }")
with open(str(source_2), "w") as f:
f.write("int A() { return 0; }")
# cannot specify -o when generating multiple output files
with pytest.raises(OSError):
llvm.make_benchmark(llvm.ClangInvocation([str(source_1), str(source_2)]))
def test_make_benchmark_undefined_symbol(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "a.c"
with open(str(source), "w") as f:
f.write("int main() { return A(); }")
benchmark = llvm.make_benchmark(source)
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"declare (dso_local )?i32 @A\(\.\.\.\)", env.observation["Ir"])
def test_make_benchmark_missing_file():
with tempfile.TemporaryDirectory() as d:
with pytest.raises(FileNotFoundError):
llvm.make_benchmark(Path(d) / "a.c")
with pytest.raises(FileNotFoundError):
llvm.make_benchmark(str(Path(d) / "a.c"))
def test_make_benchmark_unrecognized_file_type():
with tempfile.TemporaryDirectory() as d:
path = Path(d) / "foo.txt"
path.touch()
with pytest.raises(ValueError, match=r"Unrecognized file type"):
llvm.make_benchmark(path)
def test_make_benchmark_clang_job_standard_libraries(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "input.cc"
with open(str(source), "w") as f:
f.write('#include <stdio.h>\nint A() { printf(""); return 0; }')
benchmark = llvm.make_benchmark(str(source))
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
print(env.observation["Ir"])
assert re.search(r"define (dso_local )?i32 @_Z1Av\(\)", env.observation["Ir"])
assert re.search(r"declare (dso_local )?i32 @printf", env.observation["Ir"])
def test_make_benchmark_invalid_clang_job():
with pytest.raises(OSError, match="Compilation job failed with returncode"):
llvm.make_benchmark(llvm.ClangInvocation(["-invalid-arg"]))
def test_custom_benchmark_is_added_on_service_restart(env: LlvmEnv):
# When the service is restarted, the environment still uses the same custom
# benchmark.
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "a.c"
with open(str(source), "w") as f:
f.write("int main() { return 0; }")
benchmark = llvm.make_benchmark(source)
env.reset(benchmark=benchmark)
assert env.benchmark == benchmark.uri
# Kill the service so that the next call to reset() starts a new one.
env.close()
assert env.service is None
env.reset()
assert env.benchmark == benchmark.uri
def test_two_custom_benchmarks_reset(env: LlvmEnv):
with tempfile.TemporaryDirectory() as d:
source = Path(d) / "a.c"
with open(str(source), "w") as f:
f.write("int main() { return 0; }")
benchmark1 = llvm.make_benchmark(source)
benchmark2 = llvm.make_benchmark(source)
assert benchmark1.uri != benchmark2.uri
env.reset(benchmark=benchmark1)
assert env.benchmark == benchmark1.uri
env.reset()
assert env.benchmark == benchmark1.uri
with pytest.warns(
UserWarning,
match=r"Changing the benchmark has no effect until reset\(\) is called",
):
env.benchmark = benchmark2
env.reset()
assert env.benchmark == benchmark2.uri
def test_failing_build_cmd(env: LlvmEnv, tmpdir):
"""Test that reset() raises an error if build command fails."""
(Path(tmpdir) / "program.c").touch()
benchmark = env.make_benchmark(Path(tmpdir) / "program.c")
benchmark.proto.dynamic_config.build_cmd.argument.extend(
["$CC", "$IN", "-invalid-cc-argument"]
)
benchmark.proto.dynamic_config.build_cmd.timeout_seconds = 10
with pytest.raises(
BenchmarkInitError,
match=r"clang: error: unknown argument: '-invalid-cc-argument'",
):
env.reset(benchmark=benchmark)
def test_make_benchmark_from_command_line_empty_input(env: LlvmEnv):
with pytest.raises(ValueError, match="Input command line is empty"):
env.make_benchmark_from_command_line("")
with pytest.raises(ValueError, match="Input command line is empty"):
env.make_benchmark_from_command_line([])
@pytest.mark.parametrize("cmd", ["gcc", ["gcc"]])
def test_make_benchmark_from_command_line_insufficient_args(env: LlvmEnv, cmd):
with pytest.raises(ValueError, match="Input command line 'gcc' is too short"):
env.make_benchmark_from_command_line(cmd)
@pytest.mark.parametrize("cmd", ["gcc in.c -o foo", ["gcc", "in.c", "-o", "foo"]])
def test_make_benchmark_from_command_line_build_cmd(env: LlvmEnv, cmd):
with temporary_working_directory() as cwd:
with open("in.c", "w") as f:
f.write("int main() { return 0; }")
bm = env.make_benchmark_from_command_line(cmd, system_includes=False)
assert bm.proto.dynamic_config.build_cmd.argument[:4] == [
str(llvm_paths.clang_path()),
"-xir",
"$IN",
"-o",
]
assert bm.proto.dynamic_config.build_cmd.argument[-1].endswith(f"{cwd}/foo")
@pytest.mark.parametrize("cmd", ["gcc in.c -o foo", ["gcc", "in.c", "-o", "foo"]])
def test_make_benchmark_from_command_line(env: LlvmEnv, cmd):
with temporary_working_directory() as cwd:
with open("in.c", "w") as f:
f.write("int main() { return 0; }")
bm = env.make_benchmark_from_command_line(cmd)
assert not (cwd / "foo").is_file()
env.reset(benchmark=bm)
assert "main()" in env.ir
assert (cwd / "foo").is_file()
(cwd / "foo").unlink()
bm.compile(env)
assert (cwd / "foo").is_file()
def test_make_benchmark_from_command_line_no_system_includes(env: LlvmEnv):
with temporary_working_directory():
with open("in.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main() { return 0; }
"""
)
with pytest.raises(BenchmarkInitError, match="stdio.h"):
env.make_benchmark_from_command_line("gcc in.c", system_includes=False)
def test_make_benchmark_from_command_line_system_includes(env: LlvmEnv):
with temporary_working_directory():
with open("in.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main() { return 0; }
"""
)
env.make_benchmark_from_command_line("gcc in.c")
def test_make_benchmark_from_command_line_stdin(env: LlvmEnv):
with pytest.raises(ValueError, match="Input command line reads from stdin"):
env.make_benchmark_from_command_line(["gcc", "-xc", "-"])
@pytest.mark.parametrize("retcode", [1, 5])
def test_make_benchmark_from_command_line_multiple_input_sources(
env: LlvmEnv, retcode: int
):
"""Test that command lines with multiple source files are linked together."""
with temporary_working_directory() as cwd:
with open("a.c", "w") as f:
f.write("int main() { return B(); }")
with open("b.c", "w") as f:
f.write(f"int B() {{ return {retcode}; }}")
bm = env.make_benchmark_from_command_line(["gcc", "a.c", "b.c", "-o", "foo"])
assert not (cwd / "foo").is_file()
env.reset(benchmark=bm)
assert "main()" in env.ir
bm.compile(env)
assert (cwd / "foo").is_file()
p = subprocess.Popen(["./foo"])
p.communicate(timeout=60)
assert p.returncode == retcode
@pytest.mark.parametrize("retcode", [1, 5])
def test_make_benchmark_from_command_line_mixed_source_and_object_files(
env: LlvmEnv, retcode: int
):
"""Test a command line that contains both source files and precompiled
object files. The object files should be filtered from compilation but
used for the final link.
"""
with temporary_working_directory():
with open("a.c", "w") as f:
f.write(
"""
#include "b.h"
int A() {
return B();
}
int main() {
return A();
}
"""
)
with open("b.c", "w") as f:
f.write(f"int B() {{ return {retcode}; }}")
with open("b.h", "w") as f:
f.write("int B();")
# Compile b.c to object file:
subprocess.check_call([str(llvm_paths.clang_path()), "b.c", "-c"], timeout=60)
assert (Path("b.o")).is_file()
bm = env.make_benchmark_from_command_line(["gcc", "a.c", "b.o", "-o", "foo"])
env.reset(benchmark=bm)
bm.compile(env)
assert Path("foo").is_file()
p = subprocess.Popen(["./foo"])
p.communicate(timeout=60)
assert p.returncode == retcode
def test_make_benchmark_from_command_line_only_object_files(env: LlvmEnv):
with temporary_working_directory():
with open("a.c", "w") as f:
f.write("int A() { return 5; }")
# Compile b.c to object file:
subprocess.check_call([str(llvm_paths.clang_path()), "a.c", "-c"], timeout=60)
assert (Path("a.o")).is_file()
with pytest.raises(
ValueError, match="Input command line has no source file inputs"
):
env.make_benchmark_from_command_line(["gcc", "a.o", "-c"])
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/custom_benchmarks_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Regression tests for LlvmEnv.validate()."""
from io import StringIO
import pytest
from compiler_gym import CompilerEnvStateReader
from compiler_gym.envs import LlvmEnv
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# The maximum number of times to call validate() on a state to check for an
# error.
VALIDATION_FLAKINESS = 3
# A list of CSV states that should pass validation, to be used as regression
# tests.
REGRESSION_TEST_STATES = """\
benchmark://cbench-v1/rijndael,,,opt -gvn -loop-unroll -instcombine -gvn -loop-unroll -instcombine input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -gvn -loop-unroll -mem2reg -loop-rotate -gvn -loop-unroll -mem2reg -loop-rotate input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -gvn-hoist input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -jump-threading -sink -partial-inliner -mem2reg -inline -jump-threading -sink -partial-inliner -mem2reg -inline input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -mem2reg -indvars -loop-unroll -simplifycfg -mem2reg -indvars -loop-unroll -simplifycfg input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -mem2reg -instcombine -early-cse-memssa -loop-unroll input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -reg2mem -licm -reg2mem -licm -reg2mem -licm input.bc -o output.bc
benchmark://cbench-v1/rijndael,,,opt -sroa -simplifycfg -partial-inliner input.bc -o output.bc
"""
REGRESSION_TEST_STATES = list(CompilerEnvStateReader(StringIO(REGRESSION_TEST_STATES)))
REGRESSION_TEST_STATE_NAMES = [
f"{s.benchmark},{s.commandline}" for s in REGRESSION_TEST_STATES
]
# A list of CSV states that are known to fail validation.
KNOWN_BAD_STATES = """\
benchmark://cbench-v1/susan,0.40581008446378297,6.591785192489624,opt -mem2reg -reg2mem -gvn -reg2mem -gvn -newgvn input.bc -o output.bc
"""
KNOWN_BAD_STATES = list(CompilerEnvStateReader(StringIO(KNOWN_BAD_STATES)))
KNOWN_BAD_STATE_NAMES = [f"{s.benchmark},{s.commandline}" for s in KNOWN_BAD_STATES]
#
# NOTE(github.com/facebookresearch/CompilerGym/issues/103): The following
# regresison tests are deprecated after -structurizecfg was deactivated:
#
# benchmark://cbench-v1/tiff2bw,,,opt -structurizecfg input.bc -o output.bc
# benchmark://cbench-v1/tiff2rgba,,,opt -structurizecfg input.bc -o output.bc
# benchmark://cbench-v1/tiffdither,,,opt -structurizecfg input.bc -o output.bc
# benchmark://cbench-v1/tiffmedian,,,opt -structurizecfg input.bc -o output.bc
# benchmark://cBench-v0/susan,-0.5352209944751382,1.849454402923584,opt -structurizecfg -loop-extract -mergereturn -structurizecfg -loop-extract -mergereturn input.bc -o output.bc
# benchmark://cBench-v0/susan,0.9802486187845304,1.7552905082702637,opt -mem2reg -simplifycfg -lcssa -break-crit-edges -newgvn -mem2reg -simplifycfg -lcssa -break-crit-edges -newgvn input.bc -o output.bc
@skip_on_ci
@pytest.mark.parametrize("state", KNOWN_BAD_STATES, ids=KNOWN_BAD_STATE_NAMES)
def test_validate_known_bad_trajectory(env: LlvmEnv, state):
env.apply(state)
for _ in range(VALIDATION_FLAKINESS):
result = env.validate()
if result.okay():
pytest.fail("Validation succeeded on state where it should have failed")
@skip_on_ci
@pytest.mark.parametrize(
"state", REGRESSION_TEST_STATES, ids=REGRESSION_TEST_STATE_NAMES
)
def test_validate_known_good_trajectory(env: LlvmEnv, state):
env.apply(state)
for _ in range(VALIDATION_FLAKINESS):
result = env.validate()
if not result.okay():
pytest.fail(f"Validation failed: {result}\n{result.dict()}")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/validation_regression_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import gym
import numpy as np
import pytest
import compiler_gym # noqa Register environments.
from compiler_gym.envs import llvm
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from compiler_gym.errors import ServiceError
from compiler_gym.service.client_service_compiler_env import ClientServiceCompilerEnv
from compiler_gym.service.connection import CompilerGymServiceConnection
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_DIM
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.fixture(scope="function", params=["local", "service"])
def env(request) -> ClientServiceCompilerEnv:
# Redefine fixture to test both gym.make(...) and unmanaged service
# connections.
if request.param == "local":
with gym.make("llvm-v0") as env:
yield env
else:
service = CompilerGymServiceConnection(llvm.LLVM_SERVICE_BINARY)
try:
with LlvmEnv(service=service.connection.url) as env:
yield env
finally:
service.close()
def test_service_env_dies_reset(env: ClientServiceCompilerEnv):
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
env.reset("cbench-v1/crc32")
# Kill the service. Note killing the service for a ManagedConnection will
# result in a ServiceError because we have not ended the session we started
# with env.reset() above. For UnmanagedConnection, this error will not be
# raised.
try:
env.service.close()
except ServiceError as e:
assert "Service exited with returncode " in str(e)
# Check that the environment doesn't fall over.
observation, reward, done, info = env.step(0)
assert done, info["error_details"]
assert not env.in_episode
# Check that default values are returned.
np.testing.assert_array_equal(observation, np.zeros(AUTOPHASE_FEATURE_DIM))
assert reward == 0
# Reset the environment and check that it works.
env.reset(benchmark="cbench-v1/crc32")
assert env.in_episode
observation, reward, done, info = env.step(0)
assert not done, info["error_details"]
assert observation is not None
assert reward is not None
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/service_connection_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LlvmEnv.episode_reward."""
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_episode_reward_init_zero(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
env.reset("cbench-v1/crc32")
assert env.episode_reward == 0
_, reward, _, _ = env.step(env.action_space["-mem2reg"])
assert reward > 0
assert env.episode_reward == reward
env.reset()
assert env.episode_reward == 0
def test_episode_reward_with_non_default_reward_space(env: LlvmEnv):
"""Test that episode_reward is not updated when custom rewards passed to
step()."""
env.reward_space = "IrInstructionCountOz"
env.reset("cbench-v1/crc32")
assert env.episode_reward == 0
_, rewards, _, _ = env.step(
env.action_space["-mem2reg"],
reward_spaces=["IrInstructionCount"],
)
assert rewards[0] > 0
assert env.episode_reward == 0
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/episode_reward_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import os
import sys
from typing import Any, Dict, List, NamedTuple
import gym
import networkx as nx
import numpy as np
import pytest
from flaky import flaky
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from compiler_gym.spaces import Box
from compiler_gym.spaces import Dict as DictSpace
from compiler_gym.spaces import Scalar, Sequence
from tests.pytest_plugins.common import ci_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_default_observation_space(env: LlvmEnv):
env.observation_space = "Autophase"
assert env.observation_space.shape == (56,)
assert env.observation_space_spec.id == "Autophase"
env.observation_space = None
assert env.observation_space is None
assert env.observation_space_spec is None
invalid = "invalid value"
with pytest.raises(LookupError, match=f"Observation space not found: {invalid}"):
env.observation_space = invalid
def test_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
assert set(env.observation.spaces.keys()) == {
"Autophase",
"AutophaseDict",
"Bitcode",
"BitcodeFile",
"Buildtime",
"CpuInfo",
"Inst2vec",
"Inst2vecEmbeddingIndices",
"Inst2vecPreprocessedText",
"InstCount",
"InstCountDict",
"InstCountNorm",
"InstCountNormDict",
"Ir",
"IrInstructionCount",
"IrInstructionCountO0",
"IrInstructionCountO3",
"IrInstructionCountOz",
"IrSha1",
"IsBuildable",
"IsRunnable",
"LexedIr",
"LexedIrTuple",
"ObjectTextSizeBytes",
"ObjectTextSizeO0",
"ObjectTextSizeO3",
"ObjectTextSizeOz",
"Programl",
"ProgramlJson",
"Runtime",
"TextSizeBytes",
"TextSizeO0",
"TextSizeO3",
"TextSizeOz",
}
def test_ir_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Ir"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == str
assert space.space.size_range == (0, np.iinfo(np.int64).max)
value: str = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, str)
assert space.space.contains(value)
assert space.deterministic
assert not space.platform_dependent
def test_ir_sha1_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IrSha1"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == str
assert space.space.size_range == (40, 40)
value: str = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, str)
assert len(value) == 40
assert space.space.contains(value)
assert space.deterministic
assert not space.platform_dependent
def test_bitcode_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Bitcode"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == np.int8
assert space.space.size_range == (0, np.iinfo(np.int64).max)
assert space.deterministic
assert not space.platform_dependent
value: str = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert value.dtype == np.int8
assert space.space.contains(value)
def test_bitcode_file_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "BitcodeFile"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert space.space.dtype == str
assert space.space.size_range == (0, 4096)
assert not space.deterministic
assert not space.platform_dependent
value: str = env.observation[key]
print(value) # For debugging in case of error.
try:
assert isinstance(value, str)
assert os.path.isfile(value)
assert space.space.contains(value)
finally:
os.unlink(value)
@pytest.mark.parametrize(
"benchmark_uri", ["cbench-v1/crc32", "cbench-v1/qsort", "cbench-v1/gsm"]
)
def test_bitcode_file_equivalence(env: LlvmEnv, benchmark_uri: str):
"""Test that LLVM produces the same bitcode as a file and as a byte array."""
env.reset(benchmark=benchmark_uri)
bitcode = env.observation.Bitcode()
bitcode_file = env.observation.BitcodeFile()
try:
with open(bitcode_file, "rb") as f:
bitcode_from_file = f.read()
assert bitcode.tobytes() == bitcode_from_file
finally:
os.unlink(bitcode_file)
# The Autophase feature vector for benchmark://cbench-v1/crc32 in its initial
# state.
AUTOPHASE_CBENCH_CRC32 = [
0,
0,
16,
12,
2,
16,
8,
2,
4,
8,
0,
0,
0,
29,
0,
24,
9,
2,
32,
44,
41,
14,
36,
16,
13,
0,
5,
26,
3,
5,
24,
20,
24,
33,
5,
10,
3,
51,
0,
1,
0,
5,
0,
0,
0,
42,
0,
1,
8,
5,
29,
242,
157,
15,
0,
103,
]
def test_autophase_observation_space_reset(env: LlvmEnv):
"""Test that the intial observation is returned on env.reset()."""
env.observation_space = "Autophase"
observation = env.reset("cbench-v1/crc32")
print(observation.tolist()) # For debugging on error.
np.testing.assert_array_equal(observation, AUTOPHASE_CBENCH_CRC32)
def test_instcount_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCount"
space = env.observation.spaces[key]
assert isinstance(space.space, Box)
assert space.space.dtype == np.int64
assert space.space.shape == (70,)
assert space.deterministic
assert not space.platform_dependent
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
expected_values = [
242,
29,
15,
5,
24,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
3,
0,
3,
1,
8,
26,
51,
42,
5,
0,
0,
0,
1,
5,
0,
0,
0,
0,
0,
0,
0,
0,
0,
20,
0,
0,
0,
10,
0,
0,
33,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
]
np.testing.assert_array_equal(value, expected_values)
assert value.dtype == np.int64
# The first value is the total number of instructions. This should equal the
# number of instructions.
assert sum(value[3:]) == value[0]
def test_instcount_dict_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCountDict"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
assert space.deterministic
assert not space.platform_dependent
value: Dict[str, int] = env.observation[key]
print(value) # For debugging in case of error.
assert len(value) == 70
def test_instcount_norm_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCountNorm"
space = env.observation.spaces[key]
assert isinstance(space.space, Box)
assert space.space.dtype == np.float32
assert space.space.shape == (69,)
assert space.deterministic
assert not space.platform_dependent
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (69,)
assert value.dtype == np.float32
# Assert that the normalized instruction counts sum to 1. Note that the
# first two features (#blocks and #funcs) must be excluded.
assert pytest.approx(sum(value[2:]), 1.0)
def test_instcount_norm_dict_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "InstCountNormDict"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
assert space.deterministic
assert not space.platform_dependent
value: Dict[str, int] = env.observation[key]
print(value) # For debugging in case of error.
assert len(value) == 69
def test_autophase_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Autophase"
space = env.observation.spaces[key]
assert isinstance(space.space, Box)
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert value.shape == (56,)
assert space.deterministic
assert not space.platform_dependent
np.testing.assert_array_equal(value, AUTOPHASE_CBENCH_CRC32)
assert space.space.contains(value)
def test_autophase_dict_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "AutophaseDict"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
value: Dict[str, int] = env.observation[key]
print(value) # For debugging in case of error.
assert len(value) == 56
assert space.deterministic
assert not space.platform_dependent
def test_lexed_ir_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "LexedIr"
space = env.observation.spaces[key]
print(type(space.space))
print(space.space)
assert isinstance(space.space, Sequence)
value: Dict[str, np.array] = env.observation[key]
print(value) # For debugging in case of error
assert len(value) == 4
assert space.deterministic
assert not space.platform_dependent
def test_lexed_ir_tuple_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "LexedIrTuple"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: List[NamedTuple] = env.observation[key]
print(value) # For debugging in case of error
assert space.deterministic
assert not space.platform_dependent
def test_programl_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Programl"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
graph: nx.MultiDiGraph = env.observation[key]
assert isinstance(graph, nx.MultiDiGraph)
assert graph.number_of_nodes() == 512
assert graph.number_of_edges() == 907
assert graph.nodes[0] == {
"block": 0,
"function": 0,
"text": "[external]",
"type": 0,
}
assert space.deterministic
assert not space.platform_dependent
def test_programl_json_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "ProgramlJson"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
graph: Dict[str, Any] = env.observation[key]
assert isinstance(graph, dict)
def test_cpuinfo_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "CpuInfo"
space = env.observation.spaces[key]
assert isinstance(space.space, DictSpace)
value: Dict[str, Any] = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, dict)
# Test each expected key, removing it as we go.
assert isinstance(value.pop("name"), str)
assert isinstance(value.pop("cores_count"), int)
assert isinstance(value.pop("l1i_cache_size"), int)
assert isinstance(value.pop("l1i_cache_count"), int)
assert isinstance(value.pop("l1d_cache_size"), int)
assert isinstance(value.pop("l1d_cache_count"), int)
assert isinstance(value.pop("l2_cache_size"), int)
assert isinstance(value.pop("l2_cache_count"), int)
assert isinstance(value.pop("l3_cache_size"), int)
assert isinstance(value.pop("l3_cache_count"), int)
assert isinstance(value.pop("l4_cache_size"), int)
assert isinstance(value.pop("l4_cache_count"), int)
# Anything left in the JSON dictionary now is an unexpected key.
assert not value
invalid = "invalid value"
with pytest.raises(KeyError) as ctx:
_ = env.observation[invalid]
assert str(ctx.value) == f"'{invalid}'"
assert space.deterministic
assert space.platform_dependent
@pytest.fixture
def cbench_crc32_inst2vec_embedding_indices() -> List[int]:
"""The expected inst2vec embedding indices for cbench-v1/crc32."""
# The linux/macOS builds of clang produce slightly different bitcodes.
if sys.platform.lower().startswith("linux"):
return [
8564,
8564,
8564,
8564,
5,
46,
46,
40,
8564,
13,
8,
8564,
1348,
178,
286,
214,
182,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
2298,
8564,
289,
291,
3729,
3729,
8564,
178,
289,
289,
200,
1412,
1412,
8564,
3032,
180,
3032,
293,
3032,
205,
415,
205,
213,
8564,
8564,
8564,
204,
8564,
213,
215,
364,
364,
216,
8564,
216,
8564,
8564,
8564,
311,
634,
204,
8564,
415,
182,
640,
214,
182,
295,
675,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
8564,
8564,
634,
204,
8564,
213,
215,
415,
205,
216,
8564,
8564,
8564,
182,
961,
214,
415,
214,
364,
364,
216,
8564,
293,
3032,
180,
3032,
8564,
3032,
295,
257,
8564,
291,
178,
178,
200,
214,
180,
3032,
205,
216,
8564,
182,
977,
204,
8564,
182,
213,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
182,
420,
214,
213,
8564,
200,
216,
8564,
182,
961,
180,
3032,
2298,
8564,
289,
8564,
289,
178,
178,
289,
311,
594,
311,
180,
3032,
180,
3032,
293,
3032,
364,
216,
8564,
295,
431,
311,
425,
204,
8564,
597,
8564,
594,
213,
8564,
295,
653,
311,
295,
634,
204,
8564,
182,
182,
597,
213,
8564,
216,
8564,
216,
8564,
295,
634,
612,
293,
3032,
180,
3032,
180,
3032,
257,
8564,
289,
289,
8564,
8564,
178,
178,
289,
364,
311,
594,
8564,
3032,
8564,
180,
3032,
180,
3032,
8564,
8564,
8564,
204,
8564,
8564,
8564,
364,
364,
216,
8564,
8564,
8564,
8564,
8564,
205,
216,
8564,
182,
182,
488,
204,
8564,
295,
597,
182,
640,
182,
540,
612,
8564,
216,
8564,
182,
640,
214,
216,
8564,
364,
364,
216,
8564,
180,
3032,
180,
3032,
8564,
3032,
295,
257,
]
elif sys.platform.lower().startswith("darwin"):
return [
8564,
8564,
5,
46,
46,
40,
8564,
13,
8,
8564,
1348,
178,
286,
214,
182,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
2298,
8564,
289,
291,
3729,
3729,
8564,
178,
289,
289,
200,
1412,
1412,
8564,
3032,
180,
3032,
293,
3032,
205,
415,
205,
213,
8564,
8564,
5666,
204,
8564,
213,
215,
364,
364,
216,
8564,
216,
8564,
5665,
8564,
311,
634,
204,
8564,
415,
182,
640,
214,
182,
295,
675,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
5665,
8564,
634,
204,
8564,
213,
215,
415,
205,
216,
8564,
5665,
8564,
182,
961,
214,
415,
214,
364,
364,
216,
8564,
293,
3032,
180,
3032,
8564,
3032,
295,
257,
8564,
291,
178,
178,
200,
214,
180,
3032,
205,
216,
8564,
182,
977,
204,
8564,
182,
213,
235,
697,
1513,
192,
8564,
182,
182,
395,
1513,
214,
216,
8564,
182,
420,
214,
213,
8564,
200,
216,
8564,
182,
961,
180,
3032,
2298,
8564,
289,
8564,
289,
178,
178,
289,
311,
594,
311,
180,
3032,
180,
3032,
293,
3032,
364,
216,
8564,
295,
431,
311,
425,
204,
8564,
597,
8564,
594,
213,
8564,
295,
653,
311,
295,
634,
204,
8564,
182,
182,
597,
213,
8564,
216,
8564,
216,
8564,
295,
634,
612,
293,
3032,
180,
3032,
180,
3032,
257,
8564,
289,
289,
8564,
8564,
178,
178,
289,
364,
311,
594,
8564,
3032,
8564,
180,
3032,
180,
3032,
8564,
8564,
5666,
204,
8564,
5391,
8564,
364,
364,
216,
8564,
5665,
8564,
5665,
8564,
205,
216,
8564,
182,
182,
488,
204,
8564,
295,
597,
182,
640,
182,
540,
612,
8564,
216,
8564,
182,
640,
214,
216,
8564,
364,
364,
216,
8564,
180,
3032,
180,
3032,
8564,
3032,
295,
257,
]
else:
raise NotImplementedError(f"Unknown platform: {sys.platform}")
def test_inst2vec_preprocessed_observation_space(
env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int]
):
env.reset("cbench-v1/crc32")
key = "Inst2vecPreprocessedText"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: List[str] = env.observation[key]
assert isinstance(value, list)
for item, idx in zip(value, cbench_crc32_inst2vec_embedding_indices):
assert isinstance(item, str)
unk = env.inst2vec.vocab["!UNK"]
indices = [env.inst2vec.vocab.get(item, unk) for item in value]
print(indices) # For debugging in case of error.
assert indices == cbench_crc32_inst2vec_embedding_indices
assert space.deterministic
assert not space.platform_dependent
def test_inst2vec_embedding_indices_observation_space(
env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int]
):
env.reset("cbench-v1/crc32")
key = "Inst2vecEmbeddingIndices"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: List[int] = env.observation[key]
print(value) # For debugging in case of error.
print(value)
assert isinstance(value, list)
for item in value:
assert isinstance(item, int)
assert value == cbench_crc32_inst2vec_embedding_indices
assert space.deterministic
assert not space.platform_dependent
def test_inst2vec_observation_space(
env: LlvmEnv, cbench_crc32_inst2vec_embedding_indices: List[int]
):
env.reset("cbench-v1/crc32")
key = "Inst2vec"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: np.ndarray = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert value.dtype == np.float32
height, width = value.shape
assert width == len(env.inst2vec.embeddings[0])
assert height == len(cbench_crc32_inst2vec_embedding_indices)
# Check a handful of values.
np.testing.assert_array_almost_equal(
value.tolist(),
[
env.inst2vec.embeddings[idx]
for idx in cbench_crc32_inst2vec_embedding_indices
],
)
assert space.deterministic
assert not space.platform_dependent
def test_ir_instruction_count_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IrInstructionCount"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 242
key = "IrInstructionCountO0"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 242
key = "IrInstructionCountO3"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 164
key = "IrInstructionCountOz"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert not space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 114
def test_object_text_size_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
# Expected .text sizes for this benchmark: -O0, -O3, -Oz.
crc32_code_sizes = {"darwin": [1171, 3825, 3289], "linux": [1183, 3961, 3286]}
actual_code_sizes = []
key = "ObjectTextSizeBytes"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
assert isinstance(value, int)
actual_code_sizes.append(value)
key = "ObjectTextSizeO0"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
assert isinstance(value, int)
key = "ObjectTextSizeO3"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
assert isinstance(value, int)
actual_code_sizes.append(value)
key = "ObjectTextSizeOz"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
assert isinstance(value, int)
actual_code_sizes.append(value)
# For debugging in case of error:
print("Expected code sizes:", crc32_code_sizes[sys.platform])
print("Actual code sizes:", actual_code_sizes)
assert crc32_code_sizes[sys.platform] == actual_code_sizes
def test_text_size_observation_spaces(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "TextSizeBytes"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
key = "TextSizeO0"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value > 0 # Exact value is system dependent, see below.
key = "TextSizeO3"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value > 0 # Exact value is system dependent, see below.
key = "TextSizeOz"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value > 0 # Exact value is system dependent, see below.
# NOTE(cummins): The exact values here depend on the system toolchain and
# libraries, so only run this test on the GitHub CI runner environment where we
# can hardcode the values. If this test starts to fail, it may be because the CI
# runner environment has changed.
@ci_only
def test_text_size_observation_space_values(env: LlvmEnv):
env.reset("cbench-v1/crc32")
# Expected .text sizes for this benchmark: -O0, -O3, -Oz.
crc32_code_sizes = {"darwin": [16384, 16384, 16384], "linux": [2850, 5652, 4980]}
# For debugging in case of error.
print(env.observation["TextSizeO0"])
print(env.observation["TextSizeO3"])
print(env.observation["TextSizeOz"])
assert env.observation.TextSizeO0() == crc32_code_sizes[sys.platform][0]
assert env.observation.TextSizeO0() == crc32_code_sizes[sys.platform][0]
assert env.observation.TextSizeO3() == crc32_code_sizes[sys.platform][1]
assert env.observation.TextSizeOz() == crc32_code_sizes[sys.platform][2]
@flaky # Runtimes can timeout
def test_runtime_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Runtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
value: np.ndarray = env.observation[key]
print(value.tolist()) # For debugging in case of error.
assert isinstance(value, np.ndarray)
assert env.runtime_observation_count == 1
assert value.shape == (1,)
assert not space.deterministic
assert space.platform_dependent
assert space.space.contains(value)
for buildtime in value:
assert buildtime > 0
@flaky # Runtimes can timeout
def test_runtime_observation_space_different_observation_count(env: LlvmEnv):
"""Test setting a custom observation count for LLVM runtimes."""
env.reset("cbench-v1/crc32")
env.runtime_observation_count = 3
value: np.ndarray = env.observation["Runtime"]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (3,)
env.reset()
value: np.ndarray = env.observation["Runtime"]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (3,)
env.runtime_observation_count = 5
value: np.ndarray = env.observation["Runtime"]
print(value.tolist()) # For debugging in case of error.
assert value.shape == (5,)
@flaky # Runtimes can timeout
def test_runtime_observation_space_invalid_observation_count(env: LlvmEnv):
"""Test setting an invalid custom observation count for LLVM runtimes."""
env.reset("cbench-v1/crc32")
val = env.runtime_observation_count
with pytest.raises(
ValueError, match="runtimes_per_observation_count must be >= 1. Received: -5"
):
env.runtime_observation_count = -5
assert env.runtime_observation_count == val # unchanged
def test_runtime_observation_space_not_runnable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "Runtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert env.observation[key] is None
@flaky # Build can timeout
def test_buildtime_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "Buildtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert not space.deterministic
assert space.platform_dependent
value: np.ndarray = env.observation[key]
print(value) # For debugging in case of error.
assert value.shape == (1,)
assert space.space.contains(value)
assert value[0] >= 0
def test_buildtime_observation_space_not_runnable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "Buildtime"
space = env.observation.spaces[key]
assert isinstance(space.space, Sequence)
assert not space.deterministic
assert space.platform_dependent
assert env.observation[key] is None
def test_is_runnable_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IsRunnable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 1
def test_is_runnable_observation_space_not_runnable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "IsRunnable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 0
def test_is_buildable_observation_space(env: LlvmEnv):
env.reset("cbench-v1/crc32")
key = "IsBuildable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 1
def test_is_buildable_observation_space_not_buildable(env: LlvmEnv):
env.reset("chstone-v0/gsm")
key = "IsBuildable"
space = env.observation.spaces[key]
assert isinstance(space.space, Scalar)
assert space.deterministic
assert space.platform_dependent
value: int = env.observation[key]
print(value) # For debugging in case of error.
assert isinstance(value, int)
assert value == 0
def test_add_derived_space(env: LlvmEnv):
env.reset()
env.observation.add_derived_space(
id="IrLen",
base_id="Ir",
space=Box(name="IrLen", low=0, high=float("inf"), shape=(1,), dtype=int),
translate=lambda base: [15],
)
value = env.observation["IrLen"]
assert isinstance(value, list)
assert value == [15]
# Repeat the above test using the generated bound method.
value = env.observation.IrLen()
assert isinstance(value, list)
assert value == [15]
def test_derived_space_constructor():
"""Test that derived observation space can be specified at construction
time.
"""
with gym.make("llvm-v0") as env:
env.observation_space = "AutophaseDict"
a = env.reset()
with gym.make("llvm-v0", observation_space="AutophaseDict") as env:
b = env.reset()
assert a == b
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/observation_spaces_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for splitting and merging benchmarks."""
import random
import pytest
from compiler_gym.datasets import Benchmark
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm import llvm_benchmark as llvm
from compiler_gym.errors import BenchmarkInitError
from compiler_gym.validation_result import ValidationResult
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.mark.timeout(600)
def test_cbench_split_merge_build(env: LlvmEnv, validatable_cbench_uri: str):
"""Split and then merge a cBench program, checking that the merged program
passes semantics validation.
"""
env.reset(benchmark=validatable_cbench_uri, reward_space="IrInstructionCount")
initial_instruction_count = env.observation.IrInstructionCount()
split = llvm.split_benchmark_by_function(env.benchmark)
merged = llvm.merge_benchmarks(split)
# Copy over the dynamic configuration to enable runtime semantics
# validation.
merged.proto.dynamic_config.MergeFrom(env.benchmark.proto.dynamic_config)
for cb in env.benchmark.validation_callbacks():
merged.add_validation_callback(cb)
env.reset(benchmark=merged)
assert env.observation.IrInstructionCount() == initial_instruction_count
result: ValidationResult = env.validate()
assert not result.error_details
assert result.reward_validated
assert not result.actions_replay_failed
assert not result.reward_validation_failed
assert result.benchmark_semantics_validated
assert not result.benchmark_semantics_validation_failed
assert result.okay()
def test_cbench_split_globalopt_merge_safe_unsafe_actions(
env: LlvmEnv, action_name: str
):
"""A test which shows that stripping symbols before split+merge causes
invalid results.
"""
safe = action_name not in {"-strip", "-strip-nondebug"}
env.reset(benchmark="benchmark://cbench-v1/sha")
env.step(env.action_space[action_name])
ic = env.observation.IrInstructionCount()
uri = f"benchmark://test-v0/{random.randrange(16**4):04x}"
split = llvm.split_benchmark_by_function(
Benchmark.from_file_contents(uri=uri, data=env.observation.Bitcode().tobytes())
)
def run_globalopt_on_benchmark(benchmark):
env.reset(benchmark=benchmark)
env.step(env.action_space["-globalopt"])
return Benchmark.from_file_contents(
uri=benchmark, data=env.observation.Bitcode().tobytes()
)
split = [run_globalopt_on_benchmark(s) for s in split]
merged = llvm.merge_benchmarks(split)
env.reset(benchmark=merged)
if safe:
assert env.observation.IrInstructionCount() == ic
else:
assert env.observation.IrInstructionCount() != ic
@pytest.mark.parametrize("action_name", ["-strip", "-strip-nondebug"])
def test_cbench_strip_unsafe_for_split(env: LlvmEnv, action_name: str):
"""Sanity check for test_cbench_split_globalopt_merge_safe_unsafe_actions()
above. Run the two strip actions and show that they are safe to use if you
don't split+merge.
"""
env.reset(benchmark="benchmark://cbench-v1/sha")
env.step(env.action_space[action_name])
uri = f"benchmark://test-v0/{random.randrange(16**4):04x}"
split = llvm.split_benchmark_by_function(
Benchmark.from_file_contents(uri=uri, data=env.observation.Bitcode().tobytes())
)
merged = llvm.merge_benchmarks(split)
# Copy over the dynamic config to compile the binary:
merged.proto.dynamic_config.MergeFrom(env.benchmark.proto.dynamic_config)
with pytest.raises(BenchmarkInitError):
env.reset(benchmark=merged)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/split_merge_integration_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
from threading import Thread
from typing import List
import gym
from flaky import flaky
from compiler_gym import CompilerEnv
from compiler_gym.util.gym_type_hints import ActionType
from tests.test_main import main
class ThreadedWorker(Thread):
"""Create an environment and run through a set of actions in a background thread."""
def __init__(self, env_name: str, benchmark: str, actions: List[ActionType]):
super().__init__()
self.done = False
self.env_name = env_name
self.benchmark = benchmark
self.actions = actions
assert actions
def run(self) -> None:
with gym.make(self.env_name, benchmark=self.benchmark) as env:
env.reset()
for action in self.actions:
self.observation, self.reward, done, self.info = env.step(action)
assert not done, self.info["error_details"]
self.done = True
class ThreadedWorkerWithEnv(Thread):
"""Create an environment and run through a set of actions in a background thread."""
def __init__(self, env: CompilerEnv, actions: List[ActionType]):
super().__init__()
self.done = False
self.env = env
self.actions = actions
assert actions
def run(self) -> None:
for action in self.actions:
self.observation, self.reward, done, self.info = self.env.step(action)
assert not done, self.info["error_details"]
self.done = True
@flaky # Timeout may be exceeded if the environment is slow to start.
def test_running_environment_in_background_thread():
"""Test launching and running an LLVM environment in a background thread."""
thread = ThreadedWorker(
env_name="llvm-autophase-ic-v0",
benchmark="cbench-v1/crc32",
actions=[0, 0, 0],
)
thread.start()
thread.join(timeout=10)
assert thread.done
assert thread.observation is not None
assert isinstance(thread.reward, float)
assert thread.info
@flaky # Timeout may be exceeded if the environment is slow to start.
def test_moving_environment_to_background_thread():
"""Test running an LLVM environment from a background thread. The environment
is made in the main thread and used in the background thread.
"""
with gym.make("llvm-autophase-ic-v0") as env:
env.reset(benchmark="cbench-v1/crc32")
thread = ThreadedWorkerWithEnv(env=env, actions=[0, 0, 0])
thread.start()
thread.join(timeout=10)
assert thread.done
assert thread.observation is not None
assert isinstance(thread.reward, float)
assert thread.info
assert env.in_episode
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/threading_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the LLVM environment action space."""
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_to_and_from_string_no_actions(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
assert env.action_space.to_string(env.actions) == "opt input.bc -o output.bc"
assert env.action_space.from_string(env.action_space.to_string(env.actions)) == []
def test_to_and_from_string(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
env.step(env.action_space.flags.index("-mem2reg"))
env.step(env.action_space.flags.index("-reg2mem"))
assert (
env.action_space.to_string(env.actions)
== "opt -mem2reg -reg2mem input.bc -o output.bc"
)
assert env.action_space.from_string(env.action_space.to_string(env.actions)) == [
env.action_space.flags.index("-mem2reg"),
env.action_space.flags.index("-reg2mem"),
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/action_space_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import multiprocessing as mp
import sys
from typing import List
import gym
import pytest
from flaky import flaky
from compiler_gym.envs import LlvmEnv
from compiler_gym.util.gym_type_hints import ActionType
from tests.pytest_plugins.common import macos_only
from tests.test_main import main
def process_worker(
env_name: str, benchmark: str, actions: List[ActionType], queue: mp.Queue
):
assert actions
with gym.make(env_name) as env:
env.reset(benchmark=benchmark)
for action in actions:
observation, reward, done, info = env.step(action)
assert not done
queue.put((observation, reward, done, info))
def process_worker_with_env(env: LlvmEnv, actions: List[ActionType], queue: mp.Queue):
assert actions
for action in actions:
observation, reward, done, info = env.step(action)
assert not done
queue.put((env, observation, reward, done, info))
@flaky # Test contains timeouts.
def test_running_environment_in_background_process():
"""Test launching and running an LLVM environment in a background process."""
queue = mp.Queue(maxsize=3)
process = mp.Process(
target=process_worker,
args=("llvm-autophase-ic-v0", "cbench-v1/crc32", [0, 0, 0], queue),
)
process.start()
try:
process.join(timeout=60)
result = queue.get(timeout=60)
observation, reward, done, info = result
assert not done
assert observation is not None
assert isinstance(reward, float)
assert info
finally:
# kill() was added in Python 3.7.
if sys.version_info >= (3, 7, 0):
process.kill()
else:
process.terminate()
process.join(timeout=60)
@macos_only
@pytest.mark.skipif(sys.version_info < (3, 8, 0), reason="Py >= 3.8 only")
def test_moving_environment_to_background_process_macos():
"""Test moving an LLVM environment to a background process."""
queue = mp.Queue(maxsize=3)
with gym.make("llvm-autophase-ic-v0") as env:
env.reset(benchmark="cbench-v1/crc32")
process = mp.Process(
target=process_worker_with_env, args=(env, [0, 0, 0], queue)
)
# Moving an environment to a background process is not supported because
# we are using a subprocess.Popen() to manage the service binary, which
# doesn't support pickling.
with pytest.raises(TypeError):
process.start()
def test_port_collision_test():
"""Test that attempting to connect to a port that is already in use succeeds."""
with gym.make("llvm-autophase-ic-v0") as env_a:
env_a.reset(benchmark="cbench-v1/crc32")
with LlvmEnv(service=env_a.service.connection.url) as env_b:
env_b.reset(benchmark="cbench-v1/crc32")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/multiprocessing_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests that module and source IDs are stripped in the LLVM modules."""
from compiler_gym.envs.llvm import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm", "tests.pytest_plugins.common"]
def test_no_module_id_builtin_benchmark(env: LlvmEnv):
"""Test that the module and source IDs are stripped in shipped benchmark."""
env.reset("cbench-v1/crc32")
ir = env.ir
print(ir) # For debugging in case of error.
assert "; ModuleID = '-'\n" in ir
assert '\nsource_filename = "-"\n' in ir
def test_no_module_id_custom_benchmark(env: LlvmEnv):
"""Test that the module and source IDs are stripped in custom benchmark."""
with open("source.c", "w") as f:
f.write("int A() {return 0;}")
benchmark = env.make_benchmark("source.c")
env.reset(benchmark=benchmark)
ir = env.ir
print(ir) # For debugging in case of error.
assert "; ModuleID = '-'\n" in ir
assert '\nsource_filename = "-"\n' in ir
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/module_id_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the compute_observation() function."""
from pathlib import Path
import networkx.algorithms.isomorphism
import pytest
from compiler_gym.envs.llvm import LlvmEnv, compute_observation
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_invalid_observation_space_name(env: LlvmEnv, tmpdir):
tmpdir = Path(tmpdir)
env.reset()
env.write_bitcode(tmpdir / "ir.bc")
space = env.observation.spaces["Ir"]
space.id = "NotARealName"
with pytest.raises(
ValueError, match="Invalid observation space name: NOT_A_REAL_NAME"
):
compute_observation(space, tmpdir / "ir.bc")
def test_missing_file(env: LlvmEnv, tmpdir):
tmpdir = Path(tmpdir)
env.reset()
with pytest.raises(FileNotFoundError, match=str(tmpdir / "ir.bc")):
compute_observation(env.observation.spaces["Ir"], tmpdir / "ir.bc")
def test_timeout_expired(env: LlvmEnv, tmpdir):
tmpdir = Path(tmpdir)
env.reset(benchmark="cbench-v1/jpeg-c") # larger benchmark
env.write_bitcode(tmpdir / "ir.bc")
space = env.observation.spaces["Programl"]
with pytest.raises(
TimeoutError, match="Failed to compute Programl observation in 0.1 seconds"
):
compute_observation(space, tmpdir / "ir.bc", timeout=0.1)
@pytest.mark.parametrize(
"observation_space", ["Ir", "IrInstructionCount", "ObjectTextSizeBytes"]
)
def test_observation_equivalence(env: LlvmEnv, tmpdir, observation_space: str):
"""Test that compute_observation() produces the same result as the environment."""
tmpdir = Path(tmpdir)
env.reset()
env.write_bitcode(tmpdir / "ir.bc")
observation = compute_observation(
env.observation.spaces[observation_space], tmpdir / "ir.bc"
)
assert observation == env.observation[observation_space]
def test_observation_programl_equivalence(env: LlvmEnv, tmpdir):
"""Test that compute_observation() produces the same result as the environment."""
tmpdir = Path(tmpdir)
env.reset()
env.write_bitcode(tmpdir / "ir.bc")
G = compute_observation(env.observation.spaces["Programl"], tmpdir / "ir.bc")
networkx.algorithms.isomorphism.is_isomorphic(G, env.observation.Programl())
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/compute_observation_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
from compiler_gym.envs import CompilerEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_autophase_crc32_feature_vector(env: CompilerEnv):
env.reset(benchmark="cbench-v1/crc32")
print(env.benchmark) # For debugging in case of error.
features = env.observation["AutophaseDict"]
print(features) # For debugging on failure.
assert features == {
"BBNumArgsHi": 0,
"BBNumArgsLo": 0,
"onePred": 16,
"onePredOneSuc": 12,
"onePredTwoSuc": 2,
"oneSuccessor": 16,
"twoPred": 8,
"twoPredOneSuc": 2,
"twoEach": 4,
"twoSuccessor": 8,
"morePreds": 0,
"BB03Phi": 0,
"BBHiPhi": 0,
"BBNoPhi": 29,
"BeginPhi": 0,
"BranchCount": 24,
"returnInt": 9,
"CriticalCount": 2,
"NumEdges": 32,
"const32Bit": 44,
"const64Bit": 41,
"numConstZeroes": 14,
"numConstOnes": 36,
"UncondBranches": 16,
"binaryConstArg": 13,
"NumAShrInst": 0,
"NumAddInst": 5,
"NumAllocaInst": 26,
"NumAndInst": 3,
"BlockMid": 5,
"BlockLow": 24,
"NumBitCastInst": 20,
"NumBrInst": 24,
"NumCallInst": 33,
"NumGetElementPtrInst": 5,
"NumICmpInst": 10,
"NumLShrInst": 3,
"NumLoadInst": 51,
"NumMulInst": 0,
"NumOrInst": 1,
"NumPHIInst": 0,
"NumRetInst": 5,
"NumSExtInst": 0,
"NumSelectInst": 0,
"NumShlInst": 0,
"NumStoreInst": 42,
"NumSubInst": 0,
"NumTruncInst": 1,
"NumXorInst": 8,
"NumZExtInst": 5,
"TotalBlocks": 29,
"TotalInsts": 242,
"TotalMemInst": 157,
"TotalFuncs": 15,
"ArgsPhi": 0,
"testUnary": 103,
}
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/autophase_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import numpy as np
import pytest
from compiler_gym.envs.llvm.llvm_env import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# Instruction counts for cbench-v1/crc32 benchmark that are used for testing
# reward signals.
CRC32_INSTRUCTION_COUNT = 242
CRC32_INSTRUCTION_COUNT_AFTER_REG2MEM = 249
CRC32_INSTRUCTION_COUNT_O3 = 164
CRC32_INSTRUCTION_COUNT_OZ = 114
def test_instruction_count_reward(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
assert env.observation.IrInstructionCount() == CRC32_INSTRUCTION_COUNT
action = env.action_space.flags.index("-reg2mem")
env.step(action)
assert env.observation.IrInstructionCount() == CRC32_INSTRUCTION_COUNT_AFTER_REG2MEM
ic_diff = CRC32_INSTRUCTION_COUNT - CRC32_INSTRUCTION_COUNT_AFTER_REG2MEM
assert env.reward.IrInstructionCount() == ic_diff
assert env.reward.IrInstructionCountNorm() == ic_diff / CRC32_INSTRUCTION_COUNT
o3_improvement = CRC32_INSTRUCTION_COUNT - CRC32_INSTRUCTION_COUNT_O3
assert env.reward.IrInstructionCountO3() == ic_diff / o3_improvement
oz_improvement = CRC32_INSTRUCTION_COUNT - CRC32_INSTRUCTION_COUNT_OZ
assert env.reward.IrInstructionCountOz() == ic_diff / oz_improvement
def test_reward_space(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
assert env.reward_space.name == "IrInstructionCount"
env.reward_space = None
assert env.reward_space is None
invalid = "invalid value"
with pytest.raises(LookupError) as ctx:
env.reward_space = invalid
assert str(ctx.value) == f"Reward space not found: {invalid}"
def test_invalid_reward_space_name(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
invalid = "invalid value"
with pytest.raises(KeyError) as ctx:
_ = env.reward[invalid]
assert str(ctx.value) == f"'{invalid}'"
def test_reward_spaces(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
assert set(env.reward.spaces.keys()) == {
"IrInstructionCount",
"IrInstructionCountNorm",
"IrInstructionCountO3",
"IrInstructionCountOz",
"ObjectTextSizeBytes",
"ObjectTextSizeNorm",
"ObjectTextSizeO3",
"ObjectTextSizeOz",
"TextSizeBytes",
"TextSizeNorm",
"TextSizeO3",
"TextSizeOz",
}
def test_instruction_count_reward_spaces(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
key = "IrInstructionCount"
space = env.reward.spaces[key]
assert str(space) == "IrInstructionCount"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert not space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "IrInstructionCountNorm"
space = env.reward.spaces[key]
assert str(space) == "IrInstructionCountNorm"
assert env.reward[key] == 0
assert space.range == (-np.inf, 1.0)
assert space.deterministic
assert not space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "IrInstructionCountO3"
space = env.reward.spaces[key]
assert str(space) == "IrInstructionCountO3"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert not space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
key = "IrInstructionCountOz"
space = env.reward.spaces[key]
assert str(space) == "IrInstructionCountOz"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert not space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
def test_object_text_size_reward_spaces(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
key = "ObjectTextSizeBytes"
space = env.reward.spaces[key]
assert str(space) == "ObjectTextSizeBytes"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "ObjectTextSizeNorm"
space = env.reward.spaces[key]
assert str(space) == "ObjectTextSizeNorm"
assert env.reward[key] == 0
assert space.range == (-np.inf, 1.0)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "ObjectTextSizeO3"
space = env.reward.spaces[key]
assert str(space) == "ObjectTextSizeO3"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
key = "ObjectTextSizeOz"
space = env.reward.spaces[key]
assert str(space) == "ObjectTextSizeOz"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
def test_text_size_reward_spaces(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
key = "TextSizeBytes"
space = env.reward.spaces[key]
assert str(space) == "TextSizeBytes"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "TextSizeNorm"
space = env.reward.spaces[key]
assert str(space) == "TextSizeNorm"
assert env.reward[key] == 0
assert space.range == (-np.inf, 1.0)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold is None
assert space.reward_on_error(episode_reward=5) == -5
key = "TextSizeO3"
space = env.reward.spaces[key]
assert str(space) == "TextSizeO3"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
key = "TextSizeOz"
space = env.reward.spaces[key]
assert str(space) == "TextSizeOz"
assert env.reward[key] == 0
assert space.range == (-np.inf, np.inf)
assert space.deterministic
assert space.platform_dependent
assert space.success_threshold == 1
assert space.reward_on_error(episode_reward=5) == -5
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/reward_spaces_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/llvm/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import tempfile
from pathlib import Path
import gym
import pytest
from compiler_gym import CompilerEnvState
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_validate_state_no_reward():
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0") as env:
result = env.validate(state)
assert result.okay()
assert not result.reward_validated
assert str(result) == "✅ cbench-v1/crc32"
def test_validate_state_with_reward():
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
reward=0,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
result = env.validate(state)
assert result.okay()
assert result.reward_validated
assert not result.reward_validation_failed
assert str(result) == "✅ cbench-v1/crc32 0.0000"
def test_validate_state_invalid_reward():
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
reward=1,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
result = env.validate(state)
assert not result.okay()
assert result.reward_validated
assert result.reward_validation_failed
assert (
str(result) == "❌ cbench-v1/crc32 Expected reward 1.0 but received reward 0.0"
)
def test_validate_state_without_state_reward():
"""Validating state when state has no reward value."""
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
result = env.validate(state)
assert result.okay()
assert not result.reward_validated
assert not result.reward_validation_failed
def test_validate_state_without_env_reward():
"""Validating state when environment has no reward space."""
state = CompilerEnvState(
benchmark="benchmark://cbench-v1/crc32",
walltime=1,
reward=0,
commandline="opt input.bc -o output.bc",
)
with gym.make("llvm-v0") as env:
with pytest.warns(
UserWarning,
match=(
"Validating state with reward, "
"but environment has no reward space set"
),
):
result = env.validate(state)
assert result.okay()
assert not result.reward_validated
assert not result.reward_validation_failed
def test_no_validation_callback_for_custom_benchmark(env: LlvmEnv):
"""Test that a custom benchmark has no validation callback."""
with tempfile.TemporaryDirectory() as d:
p = Path(d) / "example.c"
with open(p, "w") as f:
print("int main() {return 0;}", file=f)
benchmark = env.make_benchmark(p)
env.reset(benchmark=benchmark)
assert not env.benchmark.is_validatable()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/validate_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import sys
import pytest
from flaky import flaky
from compiler_gym.envs import CompilerEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.mark.xfail(
sys.platform == "darwin",
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@flaky # Runtime can timeout
def test_step(env: CompilerEnv, observation_space: str, reward_space: str):
"""Request every combination of observation and reward in a fresh environment."""
env.reward_space = None
env.observation_space = None
env.reset(benchmark="cbench-v1/crc32")
observation = env.observation[observation_space]
assert observation is not None
reward = env.reward[reward_space]
assert reward is not None
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/fresh_environment_observation_reward_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LLVM session parameter handlers."""
import pytest
from flaky import flaky
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.errors import ServiceError, SessionNotFound
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_send_param_before_reset(env: LlvmEnv):
"""Test that send_params() before reset() raises an error."""
with pytest.raises(
SessionNotFound, match=r"Must call reset\(\) before send_params\(\)"
):
env.send_params(("test", "test"))
def test_send_param_unknown_key(env: LlvmEnv):
"""Test that send_params() raises an error when the key is not recognized."""
env.reset()
with pytest.raises(ValueError, match="Unknown parameter: unknown.key"):
env.send_params(("unknown.key", ""))
def test_benchmarks_cache_parameters(env: LlvmEnv):
env.reset()
assert int(env.send_param("service.benchmark_cache.get_size_in_bytes", "")) > 0
# Get the default max size.
assert env.send_params(("service.benchmark_cache.get_max_size_in_bytes", "")) == [
str(256 * 1024 * 1024)
]
assert env.send_param( # Same again but using singular API endpoint.
"service.benchmark_cache.get_max_size_in_bytes", ""
) == str(256 * 1024 * 1024)
# Set a new max size.
assert env.send_params(
("service.benchmark_cache.set_max_size_in_bytes", "256")
) == ["256"]
assert env.send_params(("service.benchmark_cache.get_max_size_in_bytes", "")) == [
"256"
]
def test_send_param_invalid_reply_count(env: LlvmEnv, mocker):
"""Test that an error is raised when # replies != # params."""
env.reset()
mocker.patch.object(env, "service")
with pytest.raises(
OSError, match="Sent 1 parameter but received 0 responses from the service"
):
env.send_param("param", "")
def test_benchmarks_cache_parameter_invalid_int_type(env: LlvmEnv):
env.reset()
with pytest.raises(ServiceError, match="stoi"):
env.send_params(("service.benchmark_cache.set_max_size_in_bytes", "not an int"))
@flaky # Runtime can timeout.
@pytest.mark.parametrize("n", [1, 3, 10])
def test_runtime_observation_parameters(env: LlvmEnv, n: int):
env.observation_space = "Runtime"
env.reset(benchmark="cbench-v1/qsort")
assert env.send_param("llvm.set_runtimes_per_observation_count", str(n)) == str(n)
assert env.send_param("llvm.get_runtimes_per_observation_count", "") == str(n)
runtimes = env.observation["Runtime"]
assert len(runtimes) == n
assert env.observation_space.contains(runtimes)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/llvm_session_parameters_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
from concurrent.futures import ThreadPoolExecutor
from pathlib import Path
from compiler_gym.third_party import llvm
from compiler_gym.util.runfiles_path import site_data_path
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common"]
def test_download_llvm_threaded_load_test(temporary_environ, tmpwd: Path, mocker):
"""A load test for download_llvm_files() that checks that redundant
downloads are not performed when multiple simultaneous calls to
download_llvm_files() are issued.
"""
mocker.spy(llvm, "_download_llvm_files")
mocker.spy(llvm, "download")
# Force the LLVM download function to run.
llvm._LLVM_DOWNLOADED = False
# Force a temporary new site data path and sanity check it.
temporary_environ["COMPILER_GYM_SITE_DATA"] = str(tmpwd)
assert str(site_data_path(".")).endswith(str(tmpwd))
# Perform a bunch of concurrent calls to download_llvm_files().
with ThreadPoolExecutor() as executor:
futures = [executor.submit(llvm.download_llvm_files) for _ in range(100)]
for future in futures:
future.result()
# For debugging in case of error.
print("Downloads:", llvm._download_llvm_files.call_count) # pylint: disable
for root, _, filenames in os.walk(tmpwd):
print(root)
for filename in filenames:
print(Path(root) / filename)
# Check that the files were unpacked.
assert (tmpwd / "llvm-v0" / "LICENSE").is_file()
assert (tmpwd / "llvm-v0" / "bin" / "clang").is_file()
# Check that the underlying download implementation was only called a single
# time.
assert llvm._download_llvm_files.call_count == 1 # pylint: disable
assert llvm.download.call_count == 1
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/download_llvm_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test that LlvmEnv is compatible with OpenAI gym interface."""
import gym
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_type_classes(env: LlvmEnv):
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
env.reset()
assert isinstance(env, gym.Env)
assert isinstance(env, LlvmEnv)
assert isinstance(env.unwrapped, LlvmEnv)
assert isinstance(env.action_space, gym.Space)
assert isinstance(env.observation_space, gym.Space)
assert isinstance(env.reward_range[0], float)
assert isinstance(env.reward_range[1], float)
def test_optional_properties(env: LlvmEnv):
assert "render.modes" in env.metadata
assert env.spec
def test_contextmanager(env: LlvmEnv, mocker):
mocker.spy(env, "close")
assert env.close.call_count == 0
with env:
pass
assert env.close.call_count == 1
def test_contextmanager_gym_make(mocker):
with gym.make("llvm-v0") as env:
mocker.spy(env, "close")
assert env.close.call_count == 0
with env:
pass
assert env.close.call_count == 1
def test_observation_wrapper(env: LlvmEnv):
class WrappedEnv(gym.ObservationWrapper):
def observation(self, observation):
return "Hello"
wrapped = WrappedEnv(env)
observation = wrapped.reset()
assert observation == "Hello"
observation, _, _, _ = wrapped.step(0)
assert observation == "Hello"
def test_reward_wrapper(env: LlvmEnv):
class WrappedEnv(gym.RewardWrapper):
def reward(self, reward):
return 1
wrapped = WrappedEnv(env)
wrapped.reset()
_, reward, _, _ = wrapped.step(0)
assert reward == 1
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/587", strict=True
)
def test_env_spec_make(env: LlvmEnv):
"""Test that demonstrates a failure in gym compatibility: env.spec does
not encode mutable state like benchmark, reward space, and observation
space.
"""
env.reset(benchmark="cbench-v1/bitcount")
with env.spec.make() as new_env:
assert new_env.benchmark == env.benchmark
def test_env_spec_make_workaround(env: LlvmEnv):
"""Demonstrate how #587 would be fixed, by updating the 'kwargs' dict."""
env.reset(benchmark="cbench-v1/bitcount")
env.spec._kwargs[ # pylint: disable=protected-access
"benchmark"
] = "cbench-v1/bitcount"
with env.spec.make() as new_env:
assert new_env.benchmark == env.benchmark
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/gym_interface_compatability_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import re
import tempfile
from pathlib import Path
import pytest
from compiler_gym.datasets import Benchmark
from compiler_gym.envs import CompilerEnv
from compiler_gym.envs.llvm import llvm_benchmark as llvm
from compiler_gym.errors.dataset_errors import BenchmarkInitError
from compiler_gym.service.proto import Benchmark as BenchmarkProto
from compiler_gym.service.proto import File
from tests.pytest_plugins.common import macos_only
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_add_benchmark_invalid_scheme(env: CompilerEnv):
with pytest.raises(ValueError) as ctx:
env.reset(
benchmark=Benchmark(
BenchmarkProto(
uri="benchmark://foo", program=File(uri="https://invalid/scheme")
),
)
)
assert str(ctx.value) == (
"Invalid benchmark data URI. "
'Only the file:/// scheme is supported: "https://invalid/scheme"'
)
def test_add_benchmark_invalid_path(env: CompilerEnv):
with tempfile.TemporaryDirectory() as d:
tmp = Path(d) / "not_a_file"
with pytest.raises(FileNotFoundError) as ctx:
env.reset(benchmark=Benchmark.from_file("benchmark://foo", tmp))
# Use endswith() because on macOS there may be a /private prefix.
assert str(ctx.value).endswith(str(tmp))
def test_get_system_library_flags_not_found():
with pytest.raises(
llvm.HostCompilerFailure, match="Failed to invoke 'not-a-real-binary'"
):
llvm.get_system_library_flags("not-a-real-binary")
def test_get_system_library_flags_nonzero_exit_status():
"""Test that setting the $CXX to an invalid binary raises an error."""
with pytest.raises(llvm.HostCompilerFailure, match="Failed to invoke 'false'"):
llvm.get_system_library_flags("false")
def test_get_system_library_flags_output_parse_failure():
"""Test that setting the $CXX to an invalid binary raises an error."""
with pytest.raises(
llvm.UnableToParseHostCompilerOutput,
match="Failed to parse '#include <...>' search paths from 'echo'",
):
llvm.get_system_library_flags("echo")
def test_get_system_library_flags():
flags = llvm.get_system_library_flags()
assert flags
assert "-isystem" in flags
@macos_only
def test_get_system_library_flags_system_libraries():
flags = llvm.get_system_library_flags()
assert flags
assert flags[-1] == "-L/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/lib"
def test_ClangInvocation_system_libs():
cmd = llvm.ClangInvocation(["foo.c"]).command("a.out")
assert "-isystem" in cmd
def test_ClangInvocation_no_system_libs():
cmd = llvm.ClangInvocation(["foo.c"], system_includes=False).command("a.out")
assert "-isystem" not in cmd
@pytest.mark.parametrize(
"source",
[
"",
"int A() {return 0;}",
"""
int A() {return 0;}
int B() {return A();}
int C() {return 0;}
""",
],
)
@pytest.mark.parametrize("system_includes", [False, True])
def test_make_benchmark_from_source_valid_source(
env: CompilerEnv, source: str, system_includes: bool
):
benchmark = llvm.make_benchmark_from_source(source, system_includes=system_includes)
env.reset(benchmark=benchmark)
@pytest.mark.parametrize(
"source",
[
"@syntax error!!!", # invalid syntax
"int A() {return a;}", # undefined variable
'#include "missing.h"', # missing include
],
)
@pytest.mark.parametrize("system_includes", [False, True])
def test_make_benchmark_from_source_invalid_source(source: str, system_includes: bool):
with pytest.raises(
BenchmarkInitError, match="Failed to make benchmark with compiler error:"
):
llvm.make_benchmark_from_source(source, system_includes=system_includes)
def test_make_benchmark_from_source_invalid_copt():
with pytest.raises(
BenchmarkInitError, match="Failed to make benchmark with compiler error:"
):
llvm.make_benchmark_from_source(
"int A() {return 0;}", copt=["-invalid-argument!"]
)
def test_make_benchmark_from_source_missing_system_includes():
with pytest.raises(
BenchmarkInitError, match="Failed to make benchmark with compiler error:"
):
llvm.make_benchmark_from_source("#include <stdio.h>", system_includes=False)
def test_make_benchmark_from_source_with_system_includes():
assert llvm.make_benchmark_from_source("#include <stdio.h>", system_includes=True)
def test_split_benchmark_by_function_no_functions():
benchmark = llvm.make_benchmark_from_source("")
with pytest.raises(ValueError, match="No functions found"):
llvm.split_benchmark_by_function(benchmark)
def is_defined(signature: str, ir: str):
"""Return whether the function signature is defined in the IR."""
return re.search(f"^define .*{signature}", ir, re.MULTILINE)
def is_declared(signature: str, ir: str):
"""Return whether the function signature is defined in the IR."""
return re.search(f"^declare .*{signature}", ir, re.MULTILINE)
def test_split_benchmark_by_function_repeated_split_single_function(env: CompilerEnv):
benchmark = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
for _ in range(10):
benchmarks = llvm.split_benchmark_by_function(benchmark)
assert len(benchmarks) == 2 # global initializers + extracted function
env.reset(benchmark=benchmarks[-1])
assert is_defined("i32 @A()", env.ir)
benchmark = benchmarks[-1]
def test_split_benchmark_by_function_multiple_functions(env: CompilerEnv):
benchmark = llvm.make_benchmark_from_source(
"""
int A() {return 0;}
int B() {return A();}
""",
lang="c",
)
benchmarks = llvm.split_benchmark_by_function(benchmark)
assert len(benchmarks) == 3
_, A, B = benchmarks
env.reset(benchmark=A)
assert is_defined("i32 @A()", env.ir)
assert not is_defined("i32 @B()", env.ir)
assert not is_declared("i32 @A()", env.ir)
assert not is_declared("i32 @B()", env.ir)
env.reset(benchmark=B)
assert not is_defined("i32 @A()", env.ir)
assert is_defined("i32 @B()", env.ir)
assert is_declared("i32 @A()", env.ir)
assert not is_declared("i32 @B()", env.ir)
def test_split_benchmark_by_function_maximum_function_count(env: CompilerEnv):
benchmark = llvm.make_benchmark_from_source(
"""
int A() {return 0;}
int B() {return A();}
""",
lang="c",
)
benchmarks = llvm.split_benchmark_by_function(
benchmark,
maximum_function_count=1,
)
assert len(benchmarks) == 2 # global initializers + extracted function
env.reset(benchmark=benchmarks[1])
assert is_defined("i32 @A()", env.ir)
def test_merge_benchmarks_single_input(env: CompilerEnv):
A = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
merged = llvm.merge_benchmarks([A])
env.reset(benchmark=merged)
assert is_defined("i32 @A()", env.ir)
def test_merge_benchmarks_independent(env: CompilerEnv):
A = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
B = llvm.make_benchmark_from_source("int B() {return 0;}", lang="c")
merged = llvm.merge_benchmarks([A, B])
env.reset(benchmark=merged)
assert is_defined("i32 @A()", env.ir)
assert is_defined("i32 @B()", env.ir)
def test_merge_benchmarks_multiply_defined():
A = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
with pytest.raises(ValueError, match="symbol multiply defined"):
llvm.merge_benchmarks([A, A])
def test_merge_benchmarks_declarations(env: CompilerEnv):
A = llvm.make_benchmark_from_source("int A() {return 0;}", lang="c")
B = llvm.make_benchmark_from_source("int A(); int B() {return A();}", lang="c")
merged = llvm.merge_benchmarks([A, B])
env.reset(benchmark=merged)
assert is_defined("i32 @A()", env.ir)
assert is_defined("i32 @B()", env.ir)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/llvm_benchmark_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Regression tests generated by fuzzing."""
import difflib
import subprocess
import pytest
from tests.test_main import main
pytest_plugins = [
"tests.pytest_plugins.llvm",
"tests.pytest_plugins.common",
]
@pytest.mark.xfail(reason="-separate-const-offset-from-gep", strict=True)
def test_regression_test_const_offset_from_gep(env, tmpwd, llvm_diff, llvm_opt):
env.reset(benchmark="benchmark://cbench-v1/blowfish")
env.write_ir("input.ll")
# FIXME: Removing the -separate-const-offset-from-gep actions from the below
# commandline "fixes" the test.
actions = env.action_space.from_string(
"opt -objc-arc-apelim -separate-const-offset-from-gep -sancov -indvars -loop-reduce -dse -inferattrs -loop-fusion -dce -break-crit-edges -constmerge -indvars -mem2reg -objc-arc-expand -ee-instrument -loop-reroll -break-crit-edges -separate-const-offset-from-gep -loop-idiom -float2int -dce -float2int -ipconstprop -simple-loop-unswitch -coro-cleanup -early-cse-memssa -strip -functionattrs -objc-arc-contract -sink -loop-distribute -loop-reroll -slsr -separate-const-offset-from-gep input.bc -o output.bc"
)
for action in actions:
_, _, done, info = env.step(action)
assert not done, info["error_details"]
env.write_ir("env.ll")
subprocess.check_call(
env.action_space.to_string(env.actions) + " -S -o output.ll",
env={"PATH": str(llvm_opt.parent)},
shell=True,
timeout=60,
)
with open("output.ll") as f1, open("env.ll") as f2:
for line in difflib.unified_diff(f1.readlines()[1:], f2.readlines()[1:]):
subprocess.check_output(
[
llvm_diff,
"output.ll",
]
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/fuzzing_regression_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Regression tests for LlvmEnv.fork() identified by hand or through fuzzing."""
from typing import List, NamedTuple
import pytest
from flaky import flaky
import compiler_gym
from compiler_gym.envs import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
class ForkRegressionTest(NamedTuple):
benchmark: str
pre_fork: str
post_fork: str
reward_space: str = "IrInstructionCount"
# A list of testcases where we have identified the parent and child environment
# states differing after forking and running identical actions on both.
#
# NOTE(cummins): To submit a new testcase, run the
# "minimize_fork_regression_testcase()" function below to produce a minimal
# reproducible example and add it to this list.
MINIMIZED_FORK_REGRESSION_TESTS: List[ForkRegressionTest] = [
ForkRegressionTest(
benchmark="benchmark://cbench-v1/tiff2bw",
pre_fork="-globalopt",
post_fork="-gvn-hoist",
reward_space="IrInstructionCount",
),
ForkRegressionTest(
benchmark="benchmark://cbench-v1/bzip2",
pre_fork="-mem2reg",
post_fork="-loop-guard-widening",
reward_space="IrInstructionCount",
),
ForkRegressionTest(
benchmark="benchmark://cbench-v1/jpeg-d",
pre_fork="-sroa",
post_fork="-loop-rotate",
reward_space="IrInstructionCount",
),
ForkRegressionTest(
benchmark="benchmark://cbench-v1/qsort",
pre_fork="-simplifycfg -newgvn -instcombine -break-crit-edges -gvn -inline",
post_fork="-lcssa",
reward_space="IrInstructionCount",
),
ForkRegressionTest(
benchmark="benchmark://poj104-v1/101/859",
pre_fork="-licm",
post_fork="-loop-rotate",
reward_space="IrInstructionCount",
),
]
@flaky
@pytest.mark.parametrize("test", MINIMIZED_FORK_REGRESSION_TESTS)
def test_fork_regression_test(env: LlvmEnv, test: ForkRegressionTest):
"""Run the fork regression test:
1. Initialize an environment.
2. Apply a "pre_fork" sequence of actions.
3. Create a fork of the environment.
4. Apply a "post_fork" sequence of actions in both the fork and parent.
5. Verify that the environment states have gone out of sync.
"""
env.reward_space = test.reward_space
env.reset(test.benchmark)
pre_fork = [env.action_space[f] for f in test.pre_fork.split()]
post_fork = [env.action_space[f] for f in test.post_fork.split()]
_, _, done, info = env.multistep(pre_fork)
assert not done, info
with env.fork() as fkd:
assert env.state == fkd.state # Sanity check
env.multistep(post_fork)
fkd.multistep(post_fork)
# Verify that the environment states no longer line up.
assert env.state != fkd.state
# Utility function for generating test cases. Copy this code into a standalone
# script and call the function on your test case. It will print a minimized
# version of it.
def minimize_fork_regression_testcase(test: ForkRegressionTest):
def _check_hypothesis(
benchmark: str, pre_fork: List[int], post_fork: List[int]
) -> bool:
with compiler_gym.make("llvm-v0", reward_space="IrInstructionCount") as env:
env.reset(benchmark)
_, _, done, info = env.multistep(pre_fork)
assert not done, info # Sanity check
with env.fork() as fkd:
assert env.state == fkd.state # Sanity check
env.multistep(post_fork)
fkd.multistep(post_fork)
return env.state != fkd.state
with compiler_gym.make("llvm-v0", reward_space=test.reward_space) as env:
pre_fork = [env.action_space[f] for f in test.pre_fork.split()]
post_fork = [env.action_space[f] for f in test.post_fork.split()]
pre_fork_mask = [True] * len(pre_fork)
post_fork_mask = [True] * len(post_fork)
print("Minimizing the pre-fork actions list")
for i in range(len(pre_fork)):
pre_fork_mask[i] = False
masked_pre_fork = [p for p, m in zip(pre_fork, pre_fork_mask) if m]
if _check_hypothesis(test.benchmark, masked_pre_fork, post_fork):
print(
f"Removed pre-fork action {env.action_space.names[pre_fork[i]]}, {sum(pre_fork_mask)} remaining"
)
else:
pre_fork_mask[i] = True
pre_fork = [p for p, m in zip(pre_fork, pre_fork_mask) if m]
print("Minimizing the post-fork actions list")
for i in range(len(post_fork)):
post_fork_mask[i] = False
masked_post_fork = [p for p, m in zip(post_fork, post_fork_mask) if m]
if _check_hypothesis(test.benchmark, pre_fork, masked_post_fork):
print(
f"Removed post-fork action {env.action_space.names[post_fork[i]]}, {sum(post_fork_mask)} remaining"
)
else:
pre_fork_mask[i] = True
post_fork = [p for p, m in zip(post_fork, post_fork_mask) if m]
pre_fork = " ".join(env.action_space.names[p] for p in pre_fork)
post_fork = " ".join(env.action_space.names[p] for p in post_fork)
return ForkRegressionTest(
benchmark=test.benchmark,
pre_fork=pre_fork,
post_fork=post_fork,
reward_space=test.reward_space,
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/fork_regression_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for LlvmEnv.fork()."""
import subprocess
import sys
import pytest
from compiler_gym.envs import LlvmEnv
from compiler_gym.util.runfiles_path import runfiles_path
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
EXAMPLE_BITCODE_FILE = runfiles_path(
"compiler_gym/third_party/cbench/cbench-v1/crc32.bc"
)
EXAMPLE_BITCODE_IR_INSTRUCTION_COUNT = 196
def test_with_statement(env: LlvmEnv):
"""Test that the `with` statement context manager works on forks."""
env.reset("cbench-v1/crc32")
env.step(0)
with env.fork() as fkd:
assert fkd.in_episode
assert fkd.actions == [0]
assert not fkd.in_episode
assert env.in_episode
def test_fork_child_process_is_not_orphaned(env: LlvmEnv):
env.reset("cbench-v1/crc32")
with env.fork() as fkd:
# Check that both environments share the same service.
assert isinstance(env.service.connection.process, subprocess.Popen)
assert isinstance(fkd.service.connection.process, subprocess.Popen)
assert env.service.connection.process.pid == fkd.service.connection.process.pid
process = env.service.connection.process
# Sanity check that both services are alive.
assert not env.service.connection.process.poll()
assert not fkd.service.connection.process.poll()
# Close the parent service.
env.close()
# Check that the service is still alive.
assert not env.service
assert not fkd.service.connection.process.poll()
# Close the forked service.
fkd.close()
# Check that the service has been killed.
assert process.poll() is not None
def test_fork_chain_child_processes_are_not_orphaned(env: LlvmEnv):
env.reset("cbench-v1/crc32")
# Create a chain of forked environments.
a = env.fork()
b = a.fork()
c = b.fork()
d = c.fork()
try:
# Sanity check that they share the same underlying service.
assert (
env.service.connection.process
== a.service.connection.process
== b.service.connection.process
== c.service.connection.process
== d.service.connection.process
)
proc = env.service.connection.process
# Kill the forked environments one by one.
a.close()
assert proc.poll() is None
b.close()
assert proc.poll() is None
c.close()
assert proc.poll() is None
d.close()
assert proc.poll() is None
# Kill the final environment, refcount 0, service is closed.
env.close()
assert proc.poll() is not None
finally:
a.close()
b.close()
c.close()
d.close()
def test_fork_before_reset(env: LlvmEnv):
"""Test that fork() before reset() starts an episode."""
assert not env.in_episode
with env.fork() as fkd:
assert env.in_episode
assert fkd.in_episode
def test_fork_closed_service(env: LlvmEnv):
env.reset(benchmark="cbench-v1/crc32")
_, _, done, _ = env.step(0)
assert not done
assert env.actions == [0]
env.close()
assert not env.service
with env.fork() as fkd:
assert env.actions == [0]
assert fkd.actions == [0]
def test_fork_spaces_are_same(env: LlvmEnv):
env.observation_space = "Autophase"
env.reward_space = "IrInstructionCount"
env.reset(benchmark="cbench-v1/crc32")
with env.fork() as fkd:
assert fkd.observation_space == env.observation_space
assert fkd.reward_space == env.reward_space
assert fkd.benchmark == env.benchmark
def test_fork_state(env: LlvmEnv):
env.reset("cbench-v1/crc32")
env.step(0)
assert env.actions == [0]
with env.fork() as fkd:
assert fkd.benchmark == fkd.benchmark
assert fkd.actions == env.actions
def test_fork_reset(env: LlvmEnv):
env.reset("cbench-v1/crc32")
env.step(0)
env.step(1)
env.step(2)
with env.fork() as fkd:
fkd.step(3)
assert env.actions == [0, 1, 2]
assert fkd.actions == [0, 1, 2, 3]
fkd.reset()
assert env.actions == [0, 1, 2]
assert fkd.actions == []
def test_fork_custom_benchmark(env: LlvmEnv):
benchmark = env.make_benchmark(EXAMPLE_BITCODE_FILE)
env.reset(benchmark=benchmark)
def ir(env):
"""Strip the ModuleID line from IR."""
return "\n".join(env.ir.split("\n")[1:])
with env.fork() as fkd:
assert ir(env) == ir(fkd)
fkd.reset()
assert ir(env) == ir(fkd)
def test_fork_twice_test(env: LlvmEnv):
"""Test that fork() on a forked environment works."""
env.reset(benchmark="cbench-v1/crc32")
with env.fork() as fork_a:
with fork_a.fork() as fork_b:
assert env.state == fork_a.state
assert fork_a.state == fork_b.state
def test_fork_modified_ir_is_the_same(env: LlvmEnv):
"""Test that the IR of a forked environment is the same."""
env.reset("cbench-v1/crc32")
# Apply an action that modifies the benchmark.
_, _, done, info = env.step(env.action_space.flags.index("-mem2reg"))
assert not done
assert not info["action_had_no_effect"]
with env.fork() as fkd:
assert "\n".join(env.ir.split("\n")[1:]) == "\n".join(fkd.ir.split("\n")[1:])
# Apply another action.
_, _, done, info = env.step(env.action_space.flags.index("-gvn"))
_, _, done, info = fkd.step(fkd.action_space.flags.index("-gvn"))
assert not done
assert not info["action_had_no_effect"]
# Check that IRs are still equivalent.
assert "\n".join(env.ir.split("\n")[1:]) == "\n".join(fkd.ir.split("\n")[1:])
@pytest.mark.xfail(
sys.platform == "darwin",
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
def test_fork_rewards(env: LlvmEnv, reward_space: str):
"""Test that rewards are equal after fork() is called."""
env.reward_space = reward_space
env.reset("cbench-v1/dijkstra")
actions = [env.action_space.flags.index(n) for n in ["-mem2reg", "-simplifycfg"]]
forked = env.fork()
try:
for action in actions:
_, env_reward, env_done, _ = env.step(action)
_, fkd_reward, fkd_done, _ = forked.step(action)
assert env_done is False
assert fkd_done is False
assert env_reward == fkd_reward
finally:
forked.close()
def test_fork_previous_cost_reward_update(env: LlvmEnv):
env.reward_space = "IrInstructionCount"
env.reset("cbench-v1/crc32")
env.step(env.action_space.flags.index("-mem2reg"))
with env.fork() as fkd:
_, a, _, _ = env.step(env.action_space.flags.index("-mem2reg"))
_, b, _, _ = fkd.step(env.action_space.flags.index("-mem2reg"))
assert a == b
def test_fork_previous_cost_lazy_reward_update(env: LlvmEnv):
env.reset("cbench-v1/crc32")
env.step(env.action_space.flags.index("-mem2reg"))
env.reward["IrInstructionCount"]
with env.fork() as fkd:
env.step(env.action_space.flags.index("-mem2reg"))
fkd.step(env.action_space.flags.index("-mem2reg"))
assert env.reward["IrInstructionCount"] == fkd.reward["IrInstructionCount"]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/fork_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for LLVM runtime support."""
from pathlib import Path
from typing import List
import numpy as np
import pytest
from flaky import flaky
from compiler_gym.envs.llvm import LlvmEnv, llvm_benchmark
from compiler_gym.spaces.reward import Reward
from compiler_gym.util.gym_type_hints import ActionType, ObservationType
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.mark.parametrize("runtime_observation_count", [1, 3, 5])
def test_custom_benchmark_runtime(env: LlvmEnv, tmpdir, runtime_observation_count: int):
env.reset()
env.runtime_observation_count = runtime_observation_count
with open(tmpdir / "program.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main(int argc, char** argv) {
printf("Hello\\n");
for (int i = 0; i < 10; ++i) {
argc += 2;
}
return argc - argc;
}
"""
)
benchmark = env.make_benchmark(Path(tmpdir) / "program.c")
benchmark.proto.dynamic_config.build_cmd.argument.extend(
["$CC", "$IN"] + llvm_benchmark.get_system_library_flags()
)
benchmark.proto.dynamic_config.build_cmd.outfile.extend(["a.out"])
benchmark.proto.dynamic_config.build_cmd.timeout_seconds = 10
benchmark.proto.dynamic_config.run_cmd.argument.extend(["./a.out"])
benchmark.proto.dynamic_config.run_cmd.timeout_seconds = 10
env.reset(benchmark=benchmark)
runtimes = env.observation.Runtime()
assert len(runtimes) == runtime_observation_count
assert np.all(runtimes > 0)
@flaky
def test_custom_benchmark_runtimes_differ(env: LlvmEnv, tmpdir):
"""Same as above, but test that runtimes differ from run to run."""
env.reset()
env.runtime_observation_count = 10
with open(tmpdir / "program.c", "w") as f:
f.write(
"""
#include <stdio.h>
int main(int argc, char** argv) {
printf("Hello\\n");
for (int i = 0; i < 10; ++i) {
argc += 2;
}
return argc - argc;
}
"""
)
benchmark = env.make_benchmark(Path(tmpdir) / "program.c")
benchmark.proto.dynamic_config.build_cmd.argument.extend(
["$CC", "$IN"] + llvm_benchmark.get_system_library_flags()
)
benchmark.proto.dynamic_config.build_cmd.outfile.extend(["a.out"])
benchmark.proto.dynamic_config.build_cmd.timeout_seconds = 10
benchmark.proto.dynamic_config.run_cmd.argument.extend(["./a.out"])
benchmark.proto.dynamic_config.run_cmd.timeout_seconds = 10
env.reset(benchmark=benchmark)
runtimes_a = env.observation.Runtime()
runtimes_b = env.observation.Runtime()
assert not np.all(runtimes_a == runtimes_b)
def test_invalid_runtime_count(env: LlvmEnv):
env.reset()
with pytest.raises(
ValueError, match=r"runtimes_per_observation_count must be >= 1"
):
env.runtime_observation_count = 0
with pytest.raises(
ValueError, match=r"runtimes_per_observation_count must be >= 1"
):
env.runtime_observation_count = -1
def test_runtime_observation_count_before_reset(env: LlvmEnv):
"""Test setting property before reset() is called."""
env.runtime_observation_count = 10
assert env.runtime_observation_count == 10
env.reset()
assert env.runtime_observation_count == 10
def test_runtime_warmup_runs_count_before_reset(env: LlvmEnv):
"""Test setting property before reset() is called."""
env.runtime_warmup_runs_count = 10
assert env.runtime_warmup_runs_count == 10
env.reset()
assert env.runtime_warmup_runs_count == 10
def test_runtime_observation_count_fork(env: LlvmEnv):
"""Test that custom count properties propagate on fork()."""
env.runtime_observation_count = 2
env.runtime_warmup_runs_count = 1
with env.fork() as fkd:
assert fkd.runtime_observation_count == 2
assert fkd.runtime_warmup_runs_count == 1
env.reset()
with env.fork() as fkd:
assert fkd.runtime_observation_count == 2
assert fkd.runtime_warmup_runs_count == 1
def test_default_runtime_observation_count_fork(env: LlvmEnv):
"""Test that default property values propagate on fork()."""
env.reset()
rc = env.runtime_observation_count
wc = env.runtime_warmup_runs_count
with env.fork() as fkd:
assert fkd.runtime_observation_count == rc
assert fkd.runtime_warmup_runs_count == wc
class RewardDerivedFromRuntime(Reward):
"""A custom reward space that is derived from the Runtime observation space."""
def __init__(self):
super().__init__(
name="runtimeseries",
observation_spaces=["Runtime"],
default_value=0,
min=None,
max=None,
default_negates_returns=True,
deterministic=False,
platform_dependent=True,
)
self.last_runtime_observation: List[float] = None
def reset(self, benchmark, observation_view) -> None:
del benchmark # unused
self.last_runtime_observation = observation_view["Runtime"]
def update(
self,
actions: List[ActionType],
observations: List[ObservationType],
observation_view,
) -> float:
del actions # unused
del observation_view # unused
self.last_runtime_observation = observations[0]
return 0
@flaky # runtime may fail
@pytest.mark.parametrize("runtime_observation_count", [1, 3, 5])
def test_correct_number_of_observations_during_reset(
env: LlvmEnv, runtime_observation_count: int
):
env.reward.add_space(RewardDerivedFromRuntime())
env.runtime_observation_count = runtime_observation_count
env.reset(reward_space="runtimeseries")
assert env.runtime_observation_count == runtime_observation_count
# Check that the number of observations that you are receive during reset()
# matches the amount that you asked for.
assert (
len(env.reward.spaces["runtimeseries"].last_runtime_observation)
== runtime_observation_count
)
# Check that the number of observations that you are receive during step()
# matches the amount that you asked for.
env.reward.spaces["runtimeseries"].last_runtime_observation = None
env.step(0)
assert (
len(env.reward.spaces["runtimeseries"].last_runtime_observation)
== runtime_observation_count
)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/runtime_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integrations tests for the LLVM CompilerGym environments."""
import numpy as np
from compiler_gym.envs import CompilerEnv
from compiler_gym.errors import ServiceError
from compiler_gym.third_party.autophase import AUTOPHASE_FEATURE_DIM
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
def test_step(env: CompilerEnv, action_name: str):
"""Run each action on a single benchmark."""
env.reward_space = "IrInstructionCount"
env.observation_space = "Autophase"
env.reset(benchmark="cbench-v1/crc32")
action = env.action_space.from_string(action_name)[0]
observation, reward, done, _ = env.step(action)
assert isinstance(observation, np.ndarray)
assert observation.shape == (AUTOPHASE_FEATURE_DIM,)
assert isinstance(reward, float)
assert isinstance(done, bool)
try:
env.close()
except ServiceError as e:
# env.close() will raise an error if the service terminated
# ungracefully. In that case, the "done" flag should have been set.
assert done, f"Service error was raised when 'done' flag not set: {e}"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/all_actions_single_step_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for serializing LLVM datasets."""
import pickle
from compiler_gym.datasets import Dataset
from compiler_gym.envs.llvm import LlvmEnv
from tests.pytest_plugins.common import ci_only, skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
# Installing all datasets on CI is expensive. Skip these tests, we define
# smaller versions of them below.
@skip_on_ci
def test_pickle_dataset(dataset: Dataset):
"""Test that datasets can be pickled."""
assert pickle.loads(pickle.dumps(dataset)) == dataset
@skip_on_ci
def test_pickle_benchmark(dataset: Dataset):
"""Test that benchmarks can be pickled."""
benchmark = next(dataset.benchmarks())
assert pickle.loads(pickle.dumps(benchmark))
# Smaller versions of the above tests for CI.
@ci_only
def test_pickle_cbench_dataset(env: LlvmEnv):
"""Test that datasets can be pickled."""
dataset = env.datasets["benchmark://cbench-v1"]
assert pickle.loads(pickle.dumps(dataset)) == dataset
@ci_only
def test_pickle_cbench_benchmark(env: LlvmEnv):
"""Test that benchmarks can be pickled."""
dataset = env.datasets["benchmark://cbench-v1"]
benchmark = next(dataset.benchmarks())
assert pickle.loads(pickle.dumps(benchmark))
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets_pickle_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the LLVM datasets."""
import gym
import compiler_gym.envs.llvm # noqa register environments
from tests.test_main import main
def test_default_dataset_list():
with gym.make("llvm-v0") as env:
assert list(d.name for d in env.datasets) == [
"benchmark://cbench-v1",
"benchmark://anghabench-v1",
"benchmark://blas-v0",
"benchmark://chstone-v0",
"benchmark://clgen-v0",
"benchmark://github-v0",
"benchmark://jotaibench-v0",
"benchmark://linux-v0",
"benchmark://mibench-v1",
"benchmark://npb-v0",
"benchmark://opencv-v0",
"benchmark://poj104-v1",
"benchmark://tensorflow-v0",
"generator://csmith-v0",
"generator://llvm-stress-v0",
]
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/llvm_datasets_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the POJ104 dataset."""
import sys
from itertools import islice
from pathlib import Path
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import POJ104Dataset
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def poj104_dataset() -> POJ104Dataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["poj104-v1"]
yield ds
def test_poj104_size(poj104_dataset: POJ104Dataset):
if sys.platform == "darwin":
assert poj104_dataset.size == 49815
else:
assert poj104_dataset.size == 49816
@skip_on_ci
@pytest.mark.parametrize("index", range(100))
def test_poj104_random_select(
env: LlvmEnv, poj104_dataset: POJ104Dataset, index: int, tmpwd: Path
):
uri = next(islice(poj104_dataset.benchmark_uris(), index, None))
benchmark = poj104_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "source.cc").is_file()
@skip_on_ci
def test_poj104_random_benchmark(env: LlvmEnv, poj104_dataset: POJ104Dataset):
benchmark = poj104_dataset.random_benchmark()
env.reset(benchmark=benchmark)
assert benchmark.source
@pytest.mark.parametrize(
"uri",
[
"benchmark://poj104-v1/1/1486",
],
)
def test_poj104_known_bad_bitcodes(env: LlvmEnv, uri: str):
# This test is intentionally structured in a way that if the benchmark does
# not raise an error, it still passes.
try:
env.reset(benchmark=uri)
except BenchmarkInitError as e:
assert "Failed to parse LLVM bitcode" in str(e)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/poj104_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GitHub dataset."""
import sys
from itertools import islice
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import GitHubDataset
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def github_dataset() -> GitHubDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["github-v0"]
yield ds
def test_github_size(github_dataset: GitHubDataset):
if sys.platform == "linux":
assert github_dataset.size == 49738
else:
assert github_dataset.size == 47806
@skip_on_ci
@pytest.mark.parametrize("index", range(250))
def test_github_random_select(env: LlvmEnv, github_dataset: GitHubDataset, index: int):
uri = next(islice(github_dataset.benchmark_uris(), index, None))
benchmark = github_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/github_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the AnghaBench dataset."""
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import CHStoneDataset, chstone
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def chstone_dataset() -> CHStoneDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["chstone-v0"]
yield ds
def test_anghabench_size(chstone_dataset: CHStoneDataset):
assert chstone_dataset.size == 12
def test_missing_benchmark_name(chstone_dataset: CHStoneDataset, mocker):
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(chstone_dataset, "install")
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://chstone-v0$"
):
chstone_dataset.benchmark("benchmark://chstone-v0")
chstone_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://chstone-v0/$"
):
chstone_dataset.benchmark("benchmark://chstone-v0/")
assert chstone_dataset.install.call_count == 2
@pytest.mark.parametrize("uri", chstone.URIS)
def test_chstone_benchmark_reset(
env: LlvmEnv, chstone_dataset: CHStoneDataset, uri: str
):
env.reset(chstone_dataset.benchmark(uri))
assert env.benchmark == uri
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/chstone_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Test for cBench semantics validation."""
import pytest
from compiler_gym import ValidationResult
from compiler_gym.envs.llvm import LlvmEnv
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.llvm"]
@pytest.mark.timeout(600)
def test_validate_benchmark_semantics(env: LlvmEnv, validatable_cbench_uri: str):
"""Run the validation routine on all benchmarks."""
env.reward_space = "IrInstructionCount"
env.reset(benchmark=validatable_cbench_uri)
# Run a single step.
env.step(env.action_space.flags.index("-mem2reg"))
# Validate the environment state.
result: ValidationResult = env.validate()
assert not result.error_details
assert result.reward_validated
assert not result.actions_replay_failed
assert not result.reward_validation_failed
assert result.benchmark_semantics_validated
assert not result.benchmark_semantics_validation_failed
assert result.okay()
@pytest.mark.timeout(600)
def test_non_validatable_benchmark_validate(
env: LlvmEnv, non_validatable_cbench_uri: str
):
"""Run the validation routine on all benchmarks."""
env.reward_space = "IrInstructionCount"
env.reset(benchmark=non_validatable_cbench_uri)
# Run a single step.
env.step(env.action_space.flags.index("-mem2reg"))
# Validate the environment state.
result: ValidationResult = env.validate()
assert not result.error_details
assert result.reward_validated
assert not result.actions_replay_failed
assert not result.reward_validation_failed
assert not result.benchmark_semantics_validated
assert not result.benchmark_semantics_validation_failed
assert result.okay()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/cbench_validate_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/llvm/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the AnghaBench dataset."""
import sys
from itertools import islice
import gym
import numpy as np
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import LlvmStressDataset
from compiler_gym.errors import BenchmarkInitError
from tests.pytest_plugins.common import is_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def llvm_stress_dataset() -> LlvmStressDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["generator://llvm-stress-v0"]
yield ds
def test_llvm_stress_size(llvm_stress_dataset: LlvmStressDataset):
assert llvm_stress_dataset.size == 0
assert len(llvm_stress_dataset) == 0
@pytest.mark.parametrize("index", range(3) if is_ci() else range(250))
def test_llvm_stress_random_select(
env: LlvmEnv, llvm_stress_dataset: LlvmStressDataset, index: int
):
env.observation_space = "InstCountDict"
uri = next(islice(llvm_stress_dataset.benchmark_uris(), index, None))
benchmark = llvm_stress_dataset.benchmark(uri)
# As of the current version (LLVM 10.0.0), programs generated with the
# following seeds emit an error when compiled: "Cannot emit physreg copy
# instruction".
FAILING_SEEDS = {"linux": {173, 239}, "darwin": {173}}[sys.platform]
if index in FAILING_SEEDS:
with pytest.raises(
BenchmarkInitError, match="Cannot emit physreg copy instruction"
):
env.reset(benchmark=benchmark)
else:
instcount = env.reset(benchmark=benchmark)
print(env.ir) # For debugging in case of error.
assert instcount["TotalInstsCount"] > 0
def test_random_benchmark(llvm_stress_dataset: LlvmStressDataset):
num_benchmarks = 5
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (
llvm_stress_dataset.random_benchmark(rng) for _ in range(num_benchmarks)
)
}
assert len(random_benchmarks) == num_benchmarks
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/llvm_stress_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the JotaiBench dataset."""
from itertools import islice
from pathlib import Path
import pytest
import compiler_gym
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import JotaiBenchDataset
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def jotaibench_dataset() -> JotaiBenchDataset:
with compiler_gym.make("llvm-v0") as env:
ds = env.datasets["jotaibench-v0"]
yield ds
def test_jotaibench_size(jotaibench_dataset: JotaiBenchDataset):
assert jotaibench_dataset.size == 18761
def test_missing_benchmark_name(jotaibench_dataset: JotaiBenchDataset, mocker):
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(jotaibench_dataset, "install")
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://jotaibench-v0$"
):
jotaibench_dataset.benchmark("benchmark://jotaibench-v0")
jotaibench_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://jotaibench-v0/$"
):
jotaibench_dataset.benchmark("benchmark://jotaibench-v0/")
assert jotaibench_dataset.install.call_count == 2
@skip_on_ci
@pytest.mark.parametrize("index", range(250))
def test_anghabench_random_select(
env: LlvmEnv, jotaibench_dataset: JotaiBenchDataset, index: int, tmpwd: Path
):
uri = next(islice(jotaibench_dataset.benchmark_uris(), index, None))
benchmark = jotaibench_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "function.c").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/jotaibench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the CLgen dataset."""
from itertools import islice
from pathlib import Path
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import CLgenDataset
from tests.pytest_plugins.common import is_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def clgen_dataset() -> CLgenDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["benchmark://clgen-v0"]
yield ds
def test_clgen_size(clgen_dataset: CLgenDataset):
assert clgen_dataset.size == 996
def test_missing_benchmark_name(clgen_dataset: CLgenDataset, mocker):
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(clgen_dataset, "install")
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://clgen-v0$"
):
clgen_dataset.benchmark("benchmark://clgen-v0")
clgen_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://clgen-v0/$"
):
clgen_dataset.benchmark("benchmark://clgen-v0/")
assert clgen_dataset.install.call_count == 2
@pytest.mark.parametrize("index", range(3) if is_ci() else range(250))
def test_clgen_random_select(
env: LlvmEnv, clgen_dataset: CLgenDataset, index: int, tmpwd: Path
):
uri = next(islice(clgen_dataset.benchmark_uris(), index, None))
benchmark = clgen_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "kernel.cl").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/clgen_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the Csmith dataset."""
from itertools import islice
from pathlib import Path
import gym
import numpy as np
import pytest
from flaky import flaky
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import CsmithBenchmark, CsmithDataset
from compiler_gym.errors import ServiceError
from tests.pytest_plugins.common import is_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def csmith_dataset() -> CsmithDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["generator://csmith-v0"]
yield ds
def test_csmith_size(csmith_dataset: CsmithDataset):
assert csmith_dataset.size == 0
assert len(csmith_dataset) == 0
@pytest.mark.parametrize("index", range(3) if is_ci() else range(250))
def test_csmith_random_select(
env: LlvmEnv, csmith_dataset: CsmithDataset, index: int, tmpwd: Path
):
uri = next(islice(csmith_dataset.benchmark_uris(), index, None))
benchmark = csmith_dataset.benchmark(uri)
assert isinstance(benchmark, CsmithBenchmark)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "source.c").is_file()
def test_random_benchmark(csmith_dataset: CsmithDataset):
num_benchmarks = 5
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (csmith_dataset.random_benchmark(rng) for _ in range(num_benchmarks))
}
assert len(random_benchmarks) == num_benchmarks
def test_csmith_from_seed_retry_count_exceeded(csmith_dataset: CsmithDataset):
with pytest.raises(OSError, match="Csmith failed after 5 attempts with seed 1"):
csmith_dataset.benchmark_from_seed(seed=1, max_retries=3, retry_count=5)
csmith_runtime_flaky = flaky(
max_runs=5,
rerun_filter=lambda err, *args: issubclass(err[0], ServiceError)
or isinstance(err[0], TimeoutError),
)
@csmith_runtime_flaky
def test_csmith_positive_runtimes(env: LlvmEnv, csmith_dataset: CsmithDataset):
benchmark = next(csmith_dataset.benchmarks())
env.reset(benchmark=benchmark)
val = env.observation["Runtime"]
print(val.tolist())
assert np.all(np.greater(val, 0))
@csmith_runtime_flaky
def test_csmith_positive_buildtimes(env: LlvmEnv, csmith_dataset: CsmithDataset):
benchmark = next(csmith_dataset.benchmarks())
env.reset(benchmark=benchmark)
val = env.observation["Buildtime"]
print(val.tolist())
assert np.all(np.greater(val, 0))
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/csmith_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the AnghaBench dataset."""
import sys
from itertools import islice
from pathlib import Path
import gym
import pytest
import compiler_gym.envs.llvm # noqa register environments
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import AnghaBenchDataset
from tests.pytest_plugins.common import skip_on_ci
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def anghabench_dataset() -> AnghaBenchDataset:
with gym.make("llvm-v0") as env:
ds = env.datasets["anghabench-v1"]
yield ds
def test_anghabench_size(anghabench_dataset: AnghaBenchDataset):
if sys.platform == "darwin":
assert anghabench_dataset.size == 1041265
else:
assert anghabench_dataset.size == 1041333
def test_missing_benchmark_name(anghabench_dataset: AnghaBenchDataset, mocker):
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(anghabench_dataset, "install")
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://anghabench-v1$"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1")
anghabench_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^No benchmark specified: benchmark://anghabench-v1/$"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1/")
assert anghabench_dataset.install.call_count == 2
@skip_on_ci
@pytest.mark.parametrize("index", range(250))
def test_anghabench_random_select(
env: LlvmEnv, anghabench_dataset: AnghaBenchDataset, index: int, tmpwd: Path
):
uri = next(islice(anghabench_dataset.benchmark_uris(), index, None))
benchmark = anghabench_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "function.c").is_file()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/anghabench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the cbench dataset."""
import tempfile
from pathlib import Path
import pytest
from compiler_gym.envs.llvm import LlvmEnv
from compiler_gym.envs.llvm.datasets import CBenchDataset, cbench
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.llvm"]
@pytest.fixture(scope="module")
def cbench_dataset() -> CBenchDataset:
with tempfile.TemporaryDirectory() as d:
yield CBenchDataset(site_data_base=Path(d))
def test_cbench_size(cbench_dataset: CBenchDataset):
assert cbench_dataset.size == 23
def test_cbench_uris(cbench_dataset: CBenchDataset):
assert list(cbench_dataset.benchmark_uris()) == [
"benchmark://cbench-v1/adpcm",
"benchmark://cbench-v1/bitcount",
"benchmark://cbench-v1/blowfish",
"benchmark://cbench-v1/bzip2",
"benchmark://cbench-v1/crc32",
"benchmark://cbench-v1/dijkstra",
"benchmark://cbench-v1/ghostscript",
"benchmark://cbench-v1/gsm",
"benchmark://cbench-v1/ispell",
"benchmark://cbench-v1/jpeg-c",
"benchmark://cbench-v1/jpeg-d",
"benchmark://cbench-v1/lame",
"benchmark://cbench-v1/patricia",
"benchmark://cbench-v1/qsort",
"benchmark://cbench-v1/rijndael",
"benchmark://cbench-v1/sha",
"benchmark://cbench-v1/stringsearch",
"benchmark://cbench-v1/stringsearch2",
"benchmark://cbench-v1/susan",
"benchmark://cbench-v1/tiff2bw",
"benchmark://cbench-v1/tiff2rgba",
"benchmark://cbench-v1/tiffdither",
"benchmark://cbench-v1/tiffmedian",
]
def test_validate_sha_output_okay():
output = cbench.BenchmarkExecutionResult(
walltime_seconds=0,
output="1234567890abcdef 1234567890abcd 1234567890abc 1234567890 12345",
)
assert cbench.validate_sha_output(output) is None
def test_validate_sha_output_invalid():
output = cbench.BenchmarkExecutionResult(walltime_seconds=0, output="abcd")
assert cbench.validate_sha_output(output)
def test_cbench_v0_deprecation(env: LlvmEnv):
"""Test that cBench-v0 emits a deprecation warning when used."""
with pytest.deprecated_call(match="Please use 'benchmark://cbench-v1'"):
env.datasets["cBench-v0"].install()
with pytest.deprecated_call(match="Please use 'benchmark://cbench-v1'"):
env.datasets.benchmark("benchmark://cBench-v0/crc32")
def test_cbench_v1_deprecation(env: LlvmEnv):
"""Test that cBench-v1 emits a deprecation warning when used."""
with pytest.deprecated_call(match="Please use 'benchmark://cbench-v1'"):
env.datasets["cBench-v1"].install()
with pytest.deprecated_call(match="Please use 'benchmark://cbench-v1'"):
env.datasets.benchmark("benchmark://cBench-v1/crc32")
def test_cbench_v1_dataset_param(env: LlvmEnv):
a = env.datasets.benchmark("cbench-v1/qsort?dataset=0")
b = env.datasets.benchmark("cbench-v1/qsort?dataset=0") # same as a
c = env.datasets.benchmark("cbench-v1/qsort?dataset=1")
assert a.proto.dynamic_config == b.proto.dynamic_config # sanity check
assert a.proto.dynamic_config != c.proto.dynamic_config # sanity check
def test_cbench_v1_dataset_out_of_range(env: LlvmEnv):
with pytest.raises(ValueError, match="Invalid dataset: 50"):
env.datasets.benchmark("cbench-v1/qsort?dataset=50")
with pytest.raises(ValueError, match="Invalid dataset: abc"):
env.datasets.benchmark("cbench-v1/qsort?dataset=abc")
def test_cbench_v1_init_close_test(env: LlvmEnv, benchmark_name: str):
"""Create an environment for each benchmark and close it."""
env.reset(benchmark=benchmark_name)
assert env.benchmark == benchmark_name
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/llvm/datasets/cbench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GCC CompilerGym service."""
import re
import gym
import numpy as np
import pytest
import compiler_gym.envs.gcc # noqa register environments
from compiler_gym.errors import ServiceError, SessionNotFound
from compiler_gym.spaces import Scalar, Sequence
from tests.pytest_plugins.common import with_docker, without_docker
from tests.pytest_plugins.gcc import docker_is_available, with_gcc_support
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.gcc"]
@without_docker
def test_gcc_env_fails_without_gcc_support():
with pytest.raises(ServiceError):
gym.make("gcc-v0")
@with_docker
def test_docker_default_action_space():
"""Test that the environment reports the service's action spaces."""
with gym.make("gcc-v0") as env:
assert env.action_spaces[0].name == "default"
assert len(env.action_spaces[0].names) == 2280
assert env.action_spaces[0].names[0] == "-O0"
@pytest.mark.xfail(
not docker_is_available(),
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
def test_gcc_bin(gcc_bin: str):
"""Test that the environment reports the service's reward spaces."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
assert env.gcc_spec.gcc.bin == gcc_bin
@pytest.mark.xfail(
not docker_is_available(),
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
def test_observation_spaces_failing_because_of_bug(gcc_bin: str):
"""Test that the environment reports the service's observation spaces."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
assert env.observation.spaces.keys() == {
"asm_hash",
"asm_size",
"asm",
"choices",
"command_line",
"instruction_counts",
"obj_hash",
"obj_size",
"obj",
"rtl",
"source",
}
assert env.observation.spaces["obj_size"].space == Scalar(
name="obj_size", min=-1, max=np.iinfo(np.int64).max, dtype=int
)
assert env.observation.spaces["asm"].space == Sequence(
name="asm", size_range=(0, np.iinfo(np.int64).max), dtype=str
)
def test_reward_spaces(gcc_bin: str):
"""Test that the environment reports the service's reward spaces."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
assert env.reward.spaces.keys() == {"asm_size", "obj_size"}
@with_gcc_support
def test_step_before_reset(gcc_bin: str):
"""Taking a step() before reset() is illegal."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(
SessionNotFound, match=r"Must call reset\(\) before step\(\)"
):
env.step(0)
@with_gcc_support
def test_observation_before_reset(gcc_bin: str):
"""Taking an observation before reset() is illegal."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(
SessionNotFound, match=r"Must call reset\(\) before step\(\)"
):
_ = env.observation["asm"]
@with_gcc_support
def test_reward_before_reset(gcc_bin: str):
"""Taking a reward before reset() is illegal."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(
SessionNotFound, match=r"Must call reset\(\) before step\(\)"
):
_ = env.reward["obj_size"]
@with_gcc_support
def test_reset_invalid_benchmark(gcc_bin: str):
"""Test requesting a specific benchmark."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(
LookupError, match=r"Dataset not found: benchmark://chstone-v1"
):
env.reset(benchmark="chstone-v1/flubbedydubfishface")
@with_gcc_support
def test_invalid_observation_space(gcc_bin: str):
"""Test error handling with invalid observation space."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(LookupError):
env.observation_space = 100
@with_gcc_support
def test_invalid_reward_space(gcc_bin: str):
"""Test error handling with invalid reward space."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
with pytest.raises(LookupError):
env.reward_space = 100
@with_gcc_support
def test_double_reset(gcc_bin: str):
"""Test that reset() can be called twice."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
assert env.in_episode
env.step(env.action_space.sample())
env.reset()
_, _, done, info = env.step(env.action_space.sample())
assert not done, info
assert env.in_episode
@with_gcc_support
def test_step_out_of_range(gcc_bin: str):
"""Test error handling with an invalid action."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
with pytest.raises(ValueError, match="Out-of-range"):
env.step(10000)
@with_gcc_support
def test_default_benchmark(gcc_bin: str):
"""Test that we are working with the expected default benchmark."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
assert env.benchmark.proto.uri == "benchmark://chstone-v0/adpcm"
@with_gcc_support
def test_default_reward(gcc_bin: str):
"""Test default reward space."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reward_space = "obj_size"
env.reset()
observation, reward, done, info = env.step(0)
assert observation is None
assert reward == 0
assert not done, info
@with_gcc_support
def test_source_observation(gcc_bin: str):
"""Test observation spaces."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
lines = env.source.split("\n")
assert re.match(r"# \d+ \"adpcm.c\"", lines[0])
@with_docker
def test_rtl_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.rtl.startswith(
"""
;; Function abs (abs, funcdef_no=0, decl_uid=1084, cgraph_uid=1, symbol_order=90)
(note 1 0 4 NOTE_INSN_DELETED)
(note 4 1 38 2 [bb 2] NOTE_INSN_BASIC_BLOCK)"""
)
@with_docker
def test_asm_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.asm.startswith('\t.file\t"src.c"\n\t')
@with_docker
def test_asm_size_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.asm_size == 39876
@with_docker
def test_asm_hash_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.asm_hash == "f4921de395b026a55eab3844c8fe43dd"
@with_docker
def test_instruction_counts_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.instruction_counts == {
".align": 95,
".bss": 8,
".cfi": 91,
".file": 1,
".globl": 110,
".ident": 1,
".long": 502,
".section": 10,
".size": 110,
".string": 1,
".text": 4,
".type": 110,
".zero": 83,
"addl": 44,
"addq": 17,
"andl": 2,
"call": 34,
"cltq": 67,
"cmovns": 2,
"cmpl": 30,
"cmpq": 1,
"imulq": 27,
"je": 2,
"jge": 3,
"jle": 21,
"jmp": 24,
"jne": 1,
"jns": 2,
"js": 7,
"leaq": 40,
"leave": 4,
"movl": 575,
"movq": 150,
"movslq": 31,
"negl": 5,
"negq": 1,
"nop": 7,
"orl": 1,
"popq": 11,
"pushq": 16,
"ret": 15,
"sall": 2,
"salq": 7,
"sarl": 9,
"sarq": 20,
"shrl": 2,
"subl": 7,
"subq": 15,
"testl": 1,
"testq": 4,
}
@with_docker
def test_obj_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.obj[:5].tobytes() == b"\x7fELF\x02"
@with_docker
def test_obj_size_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.obj_size == 21192
@with_docker
def test_obj_hash_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.obj_hash == "65937217c3758faf655df98741fe1d52"
@with_docker
def test_choices_observation():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
choices = env.choices
assert len(choices) == 502
assert all(map(lambda x: x == -1, choices))
@with_docker
def test_action_space_string():
"""Test observation spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert (
env.action_space.to_string(env.actions)
== "docker:gcc:11.2.0 -w -c src.c -o obj.o"
)
@with_docker
def test_gcc_spec():
"""Test gcc_spec param."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.gcc_spec.gcc.bin == "docker:gcc:11.2.0"
assert min(map(len, env.gcc_spec.options)) > 0
@with_docker
def test_set_choices():
"""Test that we can set the command line parameters"""
with gym.make("gcc-v0") as env:
env.reset()
env.choices = [-1] * len(env.gcc_spec.options)
assert env.action_space.to_string(env.actions).startswith(
"docker:gcc:11.2.0 -w -c src.c -o obj.o"
)
env.choices = [0] * len(env.gcc_spec.options)
assert env.action_space.to_string(env.actions).startswith(
"docker:gcc:11.2.0 -O0 -faggressive-loop-optimizations -falign-functions -falign-jumps -falign-labels"
)
@with_docker
def test_rewards():
"""Test reward spaces."""
with gym.make("gcc-v0") as env:
env.reset()
assert env.reward["asm_size"] == 0
assert env.reward["obj_size"] == 0
env.step(env.action_space.names.index("-O3"))
assert env.reward["asm_size"] == -19235.0
assert env.reward["obj_size"] == -6520.0
@with_gcc_support
def test_timeout(gcc_bin: str):
"""Test that the timeout can be set."""
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
env.timeout = 20
assert env.timeout == 20
env.reset()
assert env.timeout == 20
@with_docker
def test_compile():
with gym.make("gcc-v0") as env:
env.observation_space = "obj_size"
observation = env.reset()
assert observation == 21192
observation, _, _, _ = env.step(env.action_space.names.index("-O0"))
assert observation == 21192
observation, _, _, _ = env.step(env.action_space.names.index("-O3"))
assert observation == 27712
observation, _, _, _ = env.step(env.action_space.names.index("-finline"))
assert observation == 27712
@with_gcc_support
def test_fork(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
env.reset()
env.step(0)
env.step(1)
fkd = env.fork()
try:
assert env.benchmark == fkd.benchmark
assert fkd.actions == [0, 1]
fkd.step(0)
assert fkd.actions == [0, 1, 0]
assert env.actions == [0, 1]
finally:
fkd.close()
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/gcc_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GCC CompilerGym service."""
import gym
import pytest
import compiler_gym.envs.gcc # noqa register environments
from compiler_gym.errors import ServiceError
from tests.pytest_plugins.common import skip_on_ci, with_docker
from tests.test_main import main
@with_docker
def test_invalid_docker_image():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="docker:not-a-valid-image")
@with_docker
def test_version_11():
with gym.make("gcc-v0", gcc_bin="docker:gcc:11.2.0") as env:
assert env.compiler_version == "gcc (GCC) 11.2.0"
@skip_on_ci
@with_docker
def test_version_10():
with gym.make("gcc-v0", gcc_bin="docker:gcc:10.3.0") as env:
assert env.compiler_version == "gcc (GCC) 10.3.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:10.3") as env:
assert env.compiler_version == "gcc (GCC) 10.3.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:10") as env:
assert env.compiler_version == "gcc (GCC) 10.3.0"
@skip_on_ci
@with_docker
def test_version_9():
with gym.make("gcc-v0", gcc_bin="docker:gcc:9.4.0") as env:
assert env.compiler_version == "gcc (GCC) 9.4.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:9.4") as env:
assert env.compiler_version == "gcc (GCC) 9.4.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:9") as env:
assert env.compiler_version == "gcc (GCC) 9.4.0"
@skip_on_ci
@with_docker
def test_version_8():
with gym.make("gcc-v0", gcc_bin="docker:gcc:8.5.0") as env:
assert env.compiler_version == "gcc (GCC) 8.5.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:8.5") as env:
assert env.compiler_version == "gcc (GCC) 8.5.0"
with gym.make("gcc-v0", gcc_bin="docker:gcc:8") as env:
assert env.compiler_version == "gcc (GCC) 8.5.0"
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/gcc_docker_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/gcc/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the GCC CompilerGym service."""
import gym
import pytest
import compiler_gym.envs.gcc # noqa register environments
from compiler_gym.errors import ServiceError
from tests.pytest_plugins.gcc import with_system_gcc, without_system_gcc
from tests.test_main import main
def test_missing_gcc_bin():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="not-a-real-file")
def test_invalid_gcc_bin():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="false")
@with_system_gcc
def test_system_gcc():
with gym.make("gcc-v0", gcc_bin="gcc") as env:
assert "gcc" in env.compiler_version
@without_system_gcc
def test_missing_system_gcc():
with pytest.raises(ServiceError):
gym.make("gcc-v0", gcc_bin="gcc")
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/gcc_bin_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/gcc/datasets/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the Csmith dataset."""
from itertools import islice
from pathlib import Path
import gym
import numpy as np
import pytest
from compiler_gym.envs.gcc.datasets import CsmithBenchmark
from tests.pytest_plugins.common import is_ci
from tests.pytest_plugins.gcc import with_gcc_support
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.gcc"]
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@with_gcc_support
def test_csmith_size(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
csmith_dataset = env.datasets["generator://csmith-v0"]
assert csmith_dataset.size == 0
assert len(csmith_dataset) == 0
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@with_gcc_support
@pytest.mark.parametrize("index", range(3) if is_ci() else range(10))
def test_csmith_random_select(gcc_bin: str, index: int, tmpwd: Path):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
csmith_dataset = env.datasets["generator://csmith-v0"]
uri = next(islice(csmith_dataset.benchmark_uris(), index, None))
benchmark = csmith_dataset.benchmark(uri)
assert isinstance(benchmark, CsmithBenchmark)
env.reset(benchmark=benchmark)
assert benchmark.source
benchmark.write_sources_to_directory(tmpwd)
assert (tmpwd / "source.c").is_file()
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@with_gcc_support
def test_random_benchmark(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
csmith_dataset = env.datasets["generator://csmith-v0"]
num_benchmarks = 5
rng = np.random.default_rng(0)
random_benchmarks = {
b.uri
for b in (
csmith_dataset.random_benchmark(rng) for _ in range(num_benchmarks)
)
}
assert len(random_benchmarks) == num_benchmarks
@pytest.mark.xfail(
reason="github.com/facebookresearch/CompilerGym/issues/459",
)
@with_gcc_support
def test_csmith_from_seed_retry_count_exceeded(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
csmith_dataset = env.datasets["generator://csmith-v0"]
with pytest.raises(OSError, match="Csmith failed after 5 attempts with seed 1"):
csmith_dataset.benchmark_from_seed(seed=1, max_retries=3, retry_count=5)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/datasets/csmith_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for the AnghaBench dataset."""
import sys
from itertools import islice
import gym
import pytest
import compiler_gym.envs.gcc # noqa register environments
from tests.pytest_plugins.common import skip_on_ci
from tests.pytest_plugins.gcc import with_gcc_support
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.gcc"]
@with_gcc_support
def test_anghabench_size(gcc_bin: str):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
anghabench_dataset = env.datasets["anghabench-v1"]
if sys.platform == "darwin":
assert anghabench_dataset.size == 1041265
else:
assert anghabench_dataset.size == 1041333
@with_gcc_support
def test_missing_benchmark_name(gcc_bin: str, mocker):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
anghabench_dataset = env.datasets["anghabench-v1"]
# Mock install() so that on CI it doesn't download and unpack the tarfile.
mocker.patch.object(anghabench_dataset, "install")
with pytest.raises(
LookupError, match=r"^Benchmark not found: benchmark://anghabench-v1"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1")
anghabench_dataset.install.assert_called_once()
with pytest.raises(
LookupError, match=r"^Benchmark not found: benchmark://anghabench-v1/"
):
anghabench_dataset.benchmark("benchmark://anghabench-v1/")
assert anghabench_dataset.install.call_count == 2
@with_gcc_support
@skip_on_ci
@pytest.mark.parametrize("index", range(10))
def test_anghabench_random_select(gcc_bin: str, index: int):
with gym.make("gcc-v0", gcc_bin=gcc_bin) as env:
anghabench_dataset = env.datasets["anghabench-v1"]
uri = next(islice(anghabench_dataset.benchmark_uris(), index, None))
benchmark = anghabench_dataset.benchmark(uri)
env.reset(benchmark=benchmark)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/gcc/datasets/anghabench_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Tests for //compiler_gym/leaderboard:llvm_instcount."""
from pathlib import Path
import pytest
from absl import flags
from compiler_gym.leaderboard.llvm_instcount import eval_llvm_instcount_policy
from tests.pytest_plugins.common import set_command_line_flags
from tests.test_main import main
FLAGS = flags.FLAGS
pytest_plugins = ["tests.pytest_plugins.common"]
def null_policy(env) -> None:
"""A policy that does nothing."""
pass
def test_eval_llvm_instcount_policy():
set_command_line_flags(["argv0", "--n=1", "--max_benchmarks=1", "--novalidate"])
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(null_policy)
def test_eval_llvm_instcount_policy_resume(tmpwd):
# Run eval on a single benchmark.
set_command_line_flags(
[
"argv0",
"--n=1",
"--max_benchmarks=1",
"--novalidate",
"--resume",
"--leaderboard_results=test.csv",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(null_policy)
# Check that the log has a single entry (and a header row.)
assert Path("test.csv").is_file()
with open("test.csv") as f:
log = f.read()
assert len(log.rstrip().split("\n")) == 2
init_logfile = log
# Repeat, but for two benchmarks.
set_command_line_flags(
[
"argv0",
"--n=1",
"--max_benchmarks=2",
"--novalidate",
"--resume",
"--leaderboard_results=test.csv",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(null_policy)
# Check that the log extends the original.
assert Path("test.csv").is_file()
with open("test.csv") as f:
log = f.read()
assert log.startswith(init_logfile)
assert len(log.rstrip().split("\n")) == 3
init_logfile = log
# Repeat, but for two runs of each benchmark.
set_command_line_flags(
[
"argv0",
"--n=2",
"--max_benchmarks=2",
"--novalidate",
"--resume",
"--leaderboard_results=test.csv",
]
)
with pytest.raises(SystemExit):
eval_llvm_instcount_policy(null_policy)
# Check that the log extends the original.
assert Path("test.csv").is_file()
with open("test.csv") as f:
log = f.read()
assert log.startswith(init_logfile)
assert len(log.rstrip().split("\n")) == 5
def test_eval_llvm_instcount_policy_invalid_flag():
set_command_line_flags(["argv0", "--n=-1"])
with pytest.raises(AssertionError):
eval_llvm_instcount_policy(null_policy)
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/leaderboard/llvm_instcount_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/leaderboard/__init__.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Integration tests for the MLIR CompilerGym environments."""
from numbers import Real
import gym
import numpy as np
import pytest
import compiler_gym
from compiler_gym.envs import CompilerEnv, mlir
from compiler_gym.envs.mlir import MlirEnv
from compiler_gym.service.connection import CompilerGymServiceConnection
from compiler_gym.spaces import (
ActionSpace,
Box,
Dict,
Discrete,
NamedDiscrete,
Permutation,
Scalar,
SpaceSequence,
)
from compiler_gym.spaces import Tuple as TupleSpace
from compiler_gym.wrappers.mlir import convert_action, make_mlir_rl_wrapper_env
from tests.test_main import main
pytest_plugins = ["tests.pytest_plugins.common", "tests.pytest_plugins.mlir"]
@pytest.fixture(scope="function", params=["local", "service"])
def env(request) -> CompilerEnv:
"""Create an MLIR environment."""
if request.param == "local":
with gym.make("mlir-v0") as env:
yield env
else:
service = CompilerGymServiceConnection(mlir.MLIR_SERVICE_BINARY)
try:
with MlirEnv(service=service.connection.url) as env:
yield env
finally:
service.close()
def test_service_version(env: MlirEnv):
assert env.version == compiler_gym.__version__
def test_compiler_version(env: MlirEnv):
assert env.compiler_version.startswith("LLVM 14.")
def test_action_spaces_names(env: MlirEnv):
assert {a.name for a in env.action_spaces} == {"MatrixMultiplication"}
def test_action_space(env: MlirEnv):
expected_action_space = ActionSpace(
SpaceSequence(
name="MatrixMultiplication",
size_range=[1, 4],
space=Dict(
name=None,
spaces={
"tile_options": Dict(
name=None,
spaces={
"interchange_vector": Permutation(
name=None,
scalar_range=Scalar(name=None, min=0, max=2, dtype=int),
),
"tile_sizes": Box(
name=None,
low=np.array([1] * 3, dtype=int),
high=np.array([2**32] * 3, dtype=int),
dtype=np.int64,
),
"promote": Scalar(
name=None, min=False, max=True, dtype=bool
),
"promote_full_tile": Scalar(
name=None, min=False, max=True, dtype=bool
),
"loop_type": NamedDiscrete(
name=None,
items=["loops", "affine_loops"],
),
},
),
"vectorize_options": Dict(
name=None,
spaces={
"vectorize_to": NamedDiscrete(
name=None,
items=["dot", "matmul", "outer_product"],
),
"vector_transfer_split": NamedDiscrete(
name=None,
items=["none", "linalg_copy", "vector_transfer"],
),
"unroll_vector_transfers": Scalar(
name=None,
min=False,
max=True,
dtype=bool,
),
},
),
},
),
)
)
assert expected_action_space == env.action_space
def test_set_observation_space_from_spec(env: MlirEnv):
env.observation_space = env.observation.spaces["Runtime"]
obs = env.observation_space
env.observation_space = "Runtime"
assert env.observation_space == obs
def test_set_reward_space_from_spec(env: MlirEnv):
env.reward_space = env.reward.spaces["runtime"]
reward = env.reward_space
env.reward_space = "runtime"
assert env.reward_space == reward
def test_mlir_rl_wrapper_env_action_space(env: MlirEnv):
wrapper_env = make_mlir_rl_wrapper_env(env)
action_space = wrapper_env.action_space
tile_size = NamedDiscrete(
name=None,
items=["1", "2", "4", "8", "16", "32", "64", "128", "256", "512", "1024"],
)
expected_subspace = Dict(
name=None,
spaces={
"tile_options": Dict(
name=None,
spaces={
"interchange_vector": Discrete(name=None, n=6),
"tile_sizes": TupleSpace(
name=None, spaces=[tile_size, tile_size, tile_size]
),
"promote": NamedDiscrete(name=None, items=["False", "True"]),
"promote_full_tile": NamedDiscrete(
name=None, items=["False", "True"]
),
"loop_type": NamedDiscrete(
name=None,
items=["loops", "affine_loops"],
),
},
),
"vectorize_options": Dict(
name=None,
spaces={
"vectorize_to": NamedDiscrete(
name=None, items=["dot", "matmul", "outer_product"]
),
"vector_transfer_split": NamedDiscrete(
name=None,
items=["none", "linalg_copy", "vector_transfer"],
),
"unroll_vector_transfers": NamedDiscrete(
name=None, items=["False", "True"]
),
},
),
},
)
assert action_space[0] == expected_subspace
for i in range(1, 4):
assert action_space[i]["is_present"] == NamedDiscrete(
name=None, items=["False", "True"]
)
assert action_space[i]["space"] == expected_subspace
def test_convert_action():
action = [
{
"tile_options": {
"interchange_vector": 5,
"tile_sizes": [1, 3, 9],
"promote": 1,
"promote_full_tile": 0,
"loop_type": 1,
},
"vectorize_options": {
"vectorize_to": 2,
"vector_transfer_split": 1,
"unroll_vector_transfers": 1,
},
},
{"is_present": 0},
]
converted_action = convert_action(action)
expected_action = [
{
"tile_options": {
"interchange_vector": np.array([2, 1, 0], dtype=int),
"tile_sizes": [2, 8, 512],
"promote": True,
"promote_full_tile": False,
"loop_type": 1,
},
"vectorize_options": {
"vectorize_to": 2,
"vector_transfer_split": 1,
"unroll_vector_transfers": True,
},
}
]
assert len(converted_action) == len(expected_action)
assert len(converted_action[0]) == len(expected_action[0])
assert len(converted_action[0]["tile_options"]) == len(
expected_action[0]["tile_options"]
)
assert len(converted_action[0]["vectorize_options"]) == len(
expected_action[0]["vectorize_options"]
)
def test_mlir_rl_wrapper_env_observation_space(env: MlirEnv):
wrapper_env = make_mlir_rl_wrapper_env(env)
observation_space = wrapper_env.observation_space
assert observation_space == Box(
name="Runtime", shape=[1], low=0, high=np.inf, dtype=float
)
def test_mlir_rl_wrapper_env_step(env: MlirEnv):
wrapper_env = make_mlir_rl_wrapper_env(env)
action_space = wrapper_env.action_space
action_space.seed(123)
action = action_space.sample()
print(action)
observation, reward, done, _ = wrapper_env.step(action)
assert isinstance(observation, np.ndarray)
assert np.array_equal(observation.shape, [1])
assert observation[0] > 0
assert isinstance(reward, Real)
assert observation[0] == -reward
assert isinstance(done, bool)
assert done
def test_mlir_rl_wrapper_env_reset(env: MlirEnv):
wrapper_env = make_mlir_rl_wrapper_env(env)
action_space = wrapper_env.action_space
action_space.seed(123)
observation = wrapper_env.reset()
assert isinstance(observation, np.ndarray)
assert np.array_equal(observation.shape, [1])
assert observation[0] == 0
if __name__ == "__main__":
main()
|
CompilerGym-development
|
tests/mlir/mlir_env_test.py
|
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the LICENSE file
# in the root directory of this source tree.
#
# tests/**/__init__.py files are needed for pytest Python path resolution. See:
# https://docs.pytest.org/en/latest/explanation/pythonpath.html
|
CompilerGym-development
|
tests/mlir/__init__.py
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.