python_code
stringlengths 0
187k
| repo_name
stringlengths 8
46
| file_path
stringlengths 6
135
|
---|---|---|
import pytest
from allennlp.data.dataset_readers import SequenceTaggingDatasetReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
class TestSequenceTaggingDatasetReader:
@pytest.mark.parametrize("lazy", (True, False))
def test_default_format(self, lazy):
reader = SequenceTaggingDatasetReader(lazy=lazy)
instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / "data" / "sequence_tagging.tsv")
instances = ensure_list(instances)
assert len(instances) == 4
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == ["cats", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == ["dogs", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == ["snakes", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[3].fields
assert [t.text for t in fields["tokens"].tokens] == ["birds", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
def test_brown_corpus_format(self):
reader = SequenceTaggingDatasetReader(word_tag_delimiter="/")
instances = reader.read(AllenNlpTestCase.FIXTURES_ROOT / "data" / "brown_corpus.txt")
instances = ensure_list(instances)
assert len(instances) == 4
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == ["cats", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == ["dogs", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == ["snakes", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
fields = instances[3].fields
assert [t.text for t in fields["tokens"].tokens] == ["birds", "are", "animals", "."]
assert fields["tags"].labels == ["N", "V", "N", "N"]
| allennlp-master | tests/data/dataset_readers/sequence_tagging_test.py |
from typing import Iterable, List
from allennlp.data.fields import TextField
from allennlp.data.instance import Instance
from allennlp.data.dataset_readers import DatasetReader
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import Token
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common.util import ensure_list
class LazyDatasetReader(DatasetReader):
def __init__(self, instances: List[Instance], lazy: bool) -> None:
super().__init__()
self.lazy = lazy
self._instances = instances
self.num_reads = 0
def _read(self, _: str) -> Iterable[Instance]:
self.num_reads += 1
return (instance for instance in self._instances)
class TestLazyDatasetReader(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
token_indexer = {"tokens": SingleIdTokenIndexer()}
field1 = TextField([Token(t) for t in ["this", "is", "a", "sentence", "."]], token_indexer)
field2 = TextField(
[Token(t) for t in ["this", "is", "a", "different", "sentence", "."]], token_indexer
)
field3 = TextField([Token(t) for t in ["here", "is", "a", "sentence", "."]], token_indexer)
field4 = TextField([Token(t) for t in ["this", "is", "short"]], token_indexer)
self.instances = [
Instance({"text1": field1, "text2": field2}),
Instance({"text1": field3, "text2": field4}),
]
def test_lazy(self):
reader = LazyDatasetReader(self.instances, lazy=True)
assert reader.num_reads == 0
instances = reader.read("path/to/file")
for _ in range(10):
_instances = (i for i in instances)
assert ensure_list(_instances) == self.instances
assert reader.num_reads == 10
def test_non_lazy(self):
reader = LazyDatasetReader(self.instances, lazy=False)
assert reader.num_reads == 0
instances = reader.read("path/to/file")
for _ in range(10):
_instances = (i for i in instances)
assert ensure_list(_instances) == self.instances
assert reader.num_reads == 1
| allennlp-master | tests/data/dataset_readers/lazy_dataset_reader_test.py |
from collections import deque
import os
import shutil
from typing import Optional, NamedTuple, List
from filelock import FileLock
import pytest
import torch.distributed as dist
from allennlp.common.testing import AllenNlpTestCase
from allennlp.common import util as common_util
from allennlp.common.checks import ConfigurationError
from allennlp.data import Instance
from allennlp.data.dataloader import PyTorchDataLoader
from allennlp.data.dataset_readers import (
dataset_reader,
DatasetReader,
TextClassificationJsonReader,
)
from allennlp.data.dataset_readers.dataset_reader import AllennlpLazyDataset
from allennlp.data.fields import LabelField
def mock_collate_fn(item):
return item[0]
class TestDatasetReader(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.cache_directory = str(AllenNlpTestCase.FIXTURES_ROOT / "data_cache" / "with_prefix")
def teardown_method(self):
super().teardown_method()
if os.path.exists(self.cache_directory):
shutil.rmtree(self.cache_directory)
def test_lazy_dataset_can_be_iterated_through_multiple_times(self):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(lazy=True)
instances = reader.read(data_file)
assert isinstance(instances, AllennlpLazyDataset)
first_pass_instances = list(instances)
assert len(first_pass_instances) > 2
second_pass_instances = list(instances)
assert first_pass_instances == second_pass_instances
def test_read_only_creates_cache_file_once(self):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(cache_directory=self.cache_directory)
cache_file = reader._get_cache_location_for_file_path(str(data_file))
# The first read will create the cache.
reader.read(data_file)
assert os.path.exists(cache_file)
with open(cache_file, "r") as in_file:
cache_contents = in_file.read()
# The second and all subsequent reads should _use_ the cache, not modify it. I looked
# into checking file modification times, but this test will probably be faster than the
# granularity of `os.path.getmtime()` (which only returns values in seconds).
reader.read(data_file)
reader.read(data_file)
reader.read(data_file)
reader.read(data_file)
with open(cache_file, "r") as in_file:
final_cache_contents = in_file.read()
assert cache_contents == final_cache_contents
@pytest.mark.parametrize("lazy", (True, False))
def test_caching_works_with_lazy_reading(self, caplog, lazy: bool):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
snli_copy_file = str(data_file) + ".copy"
shutil.copyfile(data_file, snli_copy_file)
reader = TextClassificationJsonReader(lazy=lazy, cache_directory=self.cache_directory)
cache_file = reader._get_cache_location_for_file_path(snli_copy_file)
# The call to read() will give us an _iterator_. We'll iterate over it multiple times,
# and the caching behavior should change as we go.
assert not os.path.exists(cache_file)
instances = reader.read(snli_copy_file)
# The first iteration will create the cache
first_pass_instances = []
for instance in instances:
first_pass_instances.append(instance)
assert "Caching instances to temp file" in " ".join([rec.message for rec in caplog.records])
assert os.path.exists(cache_file)
# Now we _remove_ the data file, to be sure we're reading from the cache.
os.remove(snli_copy_file)
caplog.clear()
instances = reader.read(snli_copy_file)
second_pass_instances = []
for instance in instances:
second_pass_instances.append(instance)
assert "Reading instances from cache" in " ".join([rec.message for rec in caplog.records])
# We should get the same instances both times.
assert len(first_pass_instances) == len(second_pass_instances)
for instance, cached_instance in zip(first_pass_instances, second_pass_instances):
assert instance.fields == cached_instance.fields
# And just to be super paranoid, in case the second pass somehow bypassed the cache
# because of a bug that's hard to detect, we'll read the
# instances from the cache with a non-lazy iterator and make sure they're the same.
reader = TextClassificationJsonReader(lazy=False, cache_directory=self.cache_directory)
cached_instances = reader.read(snli_copy_file)
assert len(first_pass_instances) == len(cached_instances)
for instance, cached_instance in zip(first_pass_instances, cached_instances):
assert instance.fields == cached_instance.fields
@pytest.mark.parametrize("lazy", (True, False))
def test_caching_skipped_when_lock_not_acquired(self, caplog, lazy: bool):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(lazy=lazy, cache_directory=self.cache_directory)
reader.CACHE_FILE_LOCK_TIMEOUT = 1
cache_file = reader._get_cache_location_for_file_path(str(data_file))
with FileLock(cache_file + ".lock"):
# Right now we hold the lock on the cache, so the reader shouldn't
# be able to write to it. It will wait for 1 second (because that's what
# we set the timeout to be), and then just read the instances as normal.
caplog.clear()
instances = list(reader.read(data_file))
assert "Failed to acquire lock" in caplog.text
assert instances
# We didn't write to the cache because we couldn't acquire the file lock.
assert not os.path.exists(cache_file)
# Now we'll write to the cache and then try the same thing again, this
# time making sure that we can still successfully read without the cache
# when the lock can't be acquired.
deque(reader.read(data_file), maxlen=1)
assert os.path.exists(cache_file)
with FileLock(cache_file + ".lock"):
# Right now we hold the lock on the cache, so the reader shouldn't
# be able to write to it. It will wait for 1 second (because that's what
# we set the timeout to be), and then just read the instances as normal.
caplog.clear()
instances = list(reader.read(data_file))
assert "Failed to acquire lock" in caplog.text
assert instances
@pytest.mark.parametrize("lazy", (True, False))
def test_caching_skipped_with_distributed_training(self, caplog, monkeypatch, lazy):
monkeypatch.setattr(common_util, "is_distributed", lambda: True)
monkeypatch.setattr(dist, "get_rank", lambda: 0)
monkeypatch.setattr(dist, "get_world_size", lambda: 1)
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(lazy=lazy, cache_directory=self.cache_directory)
cache_file = reader._get_cache_location_for_file_path(str(data_file))
deque(reader.read(data_file), maxlen=1)
assert not os.path.exists(cache_file)
assert "Can't cache data instances when there are multiple processes" in caplog.text
def test_caching_with_lazy_reader_in_multi_process_loader(self):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(lazy=True, cache_directory=self.cache_directory)
deque(
PyTorchDataLoader(reader.read(data_file), collate_fn=mock_collate_fn, num_workers=2),
maxlen=0,
)
# We shouldn't write to the cache when the data is being loaded from multiple
# processes.
cache_file = reader._get_cache_location_for_file_path(str(data_file))
assert not os.path.exists(cache_file)
# But try again from the main process and we should see the cache file.
instances = list(reader.read(data_file))
assert instances
assert os.path.exists(cache_file)
# Reading again from a multi-process loader should read from the cache.
new_instances = list(
PyTorchDataLoader(reader.read(data_file), collate_fn=mock_collate_fn, num_workers=2)
)
assert len(instances) == len(new_instances)
@pytest.mark.parametrize("lazy", (True, False))
def test_max_instances(self, lazy):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(max_instances=2, lazy=lazy)
instances = reader.read(data_file)
instance_count = sum(1 for _ in instances)
assert instance_count == 2
@pytest.mark.parametrize("num_workers", (0, 1, 2))
def test_max_instances_with_multi_process_loader(self, num_workers):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
reader = TextClassificationJsonReader(max_instances=2, lazy=True)
instances = list(
PyTorchDataLoader(
reader.read(data_file), collate_fn=mock_collate_fn, num_workers=num_workers
)
)
assert len(instances) == 2
@pytest.mark.parametrize("lazy", (True, False))
def test_cached_max_instances(self, lazy):
data_file = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
# If we try reading with max instances, it shouldn't write to the cache.
reader = TextClassificationJsonReader(
cache_directory=self.cache_directory, lazy=lazy, max_instances=2
)
instances = list(reader.read(data_file))
assert len(instances) == 2
cache_file = reader._get_cache_location_for_file_path(str(data_file))
assert not os.path.exists(cache_file)
# Now reading again with no max_instances specified should create the cache.
reader = TextClassificationJsonReader(cache_directory=self.cache_directory, lazy=lazy)
instances = list(reader.read(data_file))
assert len(instances) > 2
assert os.path.exists(cache_file)
# The second read should only return two instances, even though it's from the cache.
reader = TextClassificationJsonReader(
cache_directory=self.cache_directory, max_instances=2, lazy=lazy
)
instances = list(reader.read(data_file))
assert len(instances) == 2
class MockWorkerInfo(NamedTuple):
id: int
num_workers: int
class MockDatasetReader(DatasetReader):
def _read(self, file_path):
for i in range(10):
yield self.text_to_instance(i)
def text_to_instance(self, index: int): # type: ignore
return Instance({"index": LabelField(index, skip_indexing=True)})
@pytest.mark.parametrize(
"node_rank, world_size, worker_id, num_workers, max_instances, expected_result",
[
(None, None, None, None, None, list(range(10))),
(None, None, None, None, 5, list(range(5))),
(None, None, None, None, 12, list(range(10))),
(None, None, 0, 1, None, list(range(10))),
(None, None, 0, 2, None, [0, 2, 4, 6, 8]),
(None, None, 1, 2, None, [1, 3, 5, 7, 9]),
(None, None, 0, 2, 5, [0, 2, 4]),
(None, None, 1, 2, 5, [1, 3]),
(0, 1, None, None, None, list(range(10))),
(0, 2, None, None, None, [0, 2, 4, 6, 8]),
(1, 2, None, None, None, [1, 3, 5, 7, 9]),
(0, 2, None, None, 5, [0, 2, 4]),
(1, 2, None, None, 5, [1, 3]),
(0, 2, 0, 2, None, [0, 4, 8]),
(0, 2, 1, 2, None, [1, 5, 9]),
(1, 2, 0, 2, None, [2, 6]),
(1, 2, 1, 2, None, [3, 7]),
(0, 2, 0, 2, 5, [0, 4]),
],
)
def test_instance_slicing(
monkeypatch,
node_rank: Optional[int],
world_size: Optional[int],
worker_id: Optional[int],
num_workers: Optional[int],
max_instances: Optional[int],
expected_result: List[int],
):
if node_rank is not None and world_size is not None:
monkeypatch.setattr(common_util, "is_distributed", lambda: True)
monkeypatch.setattr(dist, "get_rank", lambda: node_rank)
monkeypatch.setattr(dist, "get_world_size", lambda: world_size)
if worker_id is not None and num_workers is not None:
monkeypatch.setattr(
dataset_reader, "get_worker_info", lambda: MockWorkerInfo(worker_id, num_workers)
)
reader = MockDatasetReader(max_instances=max_instances)
result = list((x["index"].label for x in reader.read("the-path-doesnt-matter"))) # type: ignore
assert result == expected_result
class BadLazyReader(DatasetReader):
def _read(self, file_path):
return [self.text_to_instance(i) for i in range(10)]
def text_to_instance(self, index: int): # type: ignore
return Instance({"index": LabelField(index, skip_indexing=True)})
def test_config_error_when_lazy_reader_returns_list():
reader = BadLazyReader(lazy=True)
with pytest.raises(ConfigurationError, match="must return a generator"):
deque(reader.read("path"), maxlen=0)
class BadReaderReadsNothing(DatasetReader):
def _read(self, file_path):
return []
def text_to_instance(self, index: int): # type: ignore
return Instance({"index": LabelField(index, skip_indexing=True)})
def test_config_error_when_reader_returns_no_instances():
reader = BadReaderReadsNothing()
with pytest.raises(ConfigurationError, match="No instances were read"):
deque(reader.read("path"), maxlen=0)
class BadReaderForgetsToSetLazy(DatasetReader):
def __init__(self):
pass
def _read(self, file_path):
for i in range(10):
yield self.text_to_instance(i)
def text_to_instance(self, index: int): # type: ignore
return Instance({"index": LabelField(index, skip_indexing=True)})
def warning_when_reader_has_no_lazy_set():
with pytest.warns(UserWarning, match="DatasetReader.lazy is not set"):
reader = BadReaderForgetsToSetLazy()
reader.read("path")
| allennlp-master | tests/data/dataset_readers/dataset_reader_test.py |
allennlp-master | tests/data/dataset_readers/__init__.py |
|
import pytest
from allennlp.common import Params
from allennlp.common.util import ensure_list
from allennlp.data.dataset_readers import BabiReader
from allennlp.common.testing import AllenNlpTestCase
class TestBAbIReader:
@pytest.mark.parametrize(
"keep_sentences, lazy", [(False, False), (False, True), (True, False), (True, True)]
)
def test_read_from_file(self, keep_sentences, lazy):
reader = BabiReader(keep_sentences=keep_sentences, lazy=lazy)
instances = ensure_list(reader.read(AllenNlpTestCase.FIXTURES_ROOT / "data" / "babi.txt"))
assert len(instances) == 8
if keep_sentences:
assert [t.text for t in instances[0].fields["context"][3].tokens[3:]] == [
"of",
"wolves",
".",
]
assert [t.sequence_index for t in instances[0].fields["supports"]] == [0, 1]
else:
assert [t.text for t in instances[0].fields["context"].tokens[7:9]] == ["afraid", "of"]
def test_can_build_from_params(self):
reader = BabiReader.from_params(Params({"keep_sentences": True}))
assert reader._keep_sentences
assert reader._token_indexers["tokens"].__class__.__name__ == "SingleIdTokenIndexer"
| allennlp-master | tests/data/dataset_readers/babi_reader_test.py |
import pytest
from typing import List
from allennlp.data.dataset_readers import TextClassificationJsonReader
from allennlp.common.util import ensure_list
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.tokenizers.sentence_splitter import SpacySentenceSplitter
from allennlp.common.util import get_spacy_model
class TestTextClassificationJsonReader:
@pytest.mark.parametrize("lazy", (True, False))
def test_set_skip_indexing_true(self, lazy):
reader = TextClassificationJsonReader(lazy=lazy, skip_label_indexing=True)
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "integer_labels.jsonl"
)
instances = reader.read(ag_path)
instances = ensure_list(instances)
instance1 = {"tokens": ["This", "text", "has", "label", "0"], "label": 0}
instance2 = {"tokens": ["This", "text", "has", "label", "1"], "label": 1}
assert len(instances) == 2
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
with pytest.raises(ValueError) as exec_info:
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "imdb_corpus.jsonl"
)
ensure_list(reader.read(ag_path))
assert str(exec_info.value) == "Labels must be integers if skip_label_indexing is True."
@pytest.mark.parametrize("lazy", (True, False))
def test_read_from_file_ag_news_corpus(self, lazy):
reader = TextClassificationJsonReader(lazy=lazy)
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "ag_news_corpus.jsonl"
)
instances = reader.read(ag_path)
instances = ensure_list(instances)
instance1 = {
"tokens": [
"Memphis",
"Rout",
"Still",
"Stings",
"for",
"No",
".",
"14",
"Louisville",
";",
"Coach",
"Petrino",
"Vows",
"to",
"Have",
"Team",
"Better",
"Prepared",
".",
"NASHVILLE",
",",
"Tenn.",
"Nov",
"3",
",",
"2004",
"-",
"Louisville",
"#",
"39;s",
"30-point",
"loss",
"at",
"home",
"to",
"Memphis",
"last",
"season",
"is",
"still",
"a",
"painful",
"memory",
"for",
"the",
"Cardinals",
".",
],
"label": "2",
}
instance2 = {
"tokens": [
"AP",
"-",
"Eli",
"Manning",
"has",
"replaced",
"Kurt",
"Warner",
"as",
"the",
"New",
"York",
"Giants",
"'",
"starting",
"quarterback",
".",
],
"label": "2",
}
instance3 = {
"tokens": [
"A",
"conference",
"dedicated",
"to",
"online",
"journalism",
"explores",
"the",
"effect",
"blogs",
"have",
"on",
"news",
"reporting",
".",
"Some",
"say",
"they",
"draw",
"attention",
"to",
"under",
"-",
"reported",
"stories",
".",
"Others",
"struggle",
"to",
"establish",
"the",
"credibility",
"enjoyed",
"by",
"professionals",
".",
],
"label": "4",
}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == instance3["tokens"]
assert fields["label"].label == instance3["label"]
@pytest.mark.parametrize("lazy", (True, False))
def test_read_from_file_ag_news_corpus_and_truncates_properly(self, lazy):
reader = TextClassificationJsonReader(lazy=lazy, max_sequence_length=5)
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "ag_news_corpus.jsonl"
)
instances = reader.read(ag_path)
instances = ensure_list(instances)
instance1 = {"tokens": ["Memphis", "Rout", "Still", "Stings", "for"], "label": "2"}
instance2 = {"tokens": ["AP", "-", "Eli", "Manning", "has"], "label": "2"}
instance3 = {"tokens": ["A", "conference", "dedicated", "to", "online"], "label": "4"}
assert len(instances) == 3
fields = instances[0].fields
assert [t.text for t in fields["tokens"].tokens] == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
assert [t.text for t in fields["tokens"].tokens] == instance2["tokens"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
assert [t.text for t in fields["tokens"].tokens] == instance3["tokens"]
assert fields["label"].label == instance3["label"]
@pytest.mark.parametrize("max_sequence_length", (None, 5))
@pytest.mark.parametrize("lazy", (True, False))
def test_read_from_file_ag_news_corpus_and_segments_sentences_properly(
self, lazy, max_sequence_length
):
reader = TextClassificationJsonReader(
lazy=lazy, segment_sentences=True, max_sequence_length=max_sequence_length
)
ag_path = (
AllenNlpTestCase.FIXTURES_ROOT
/ "data"
/ "text_classification_json"
/ "ag_news_corpus.jsonl"
)
instances = reader.read(ag_path)
instances = ensure_list(instances)
splitter = SpacySentenceSplitter()
spacy_tokenizer = get_spacy_model("en_core_web_sm", False, False, False)
text1 = (
"Memphis Rout Still Stings for No. 14 Louisville; Coach "
"Petrino Vows to Have Team Better Prepared. NASHVILLE, "
"Tenn. Nov 3, 2004 - Louisville #39;s 30-point loss "
"at home to Memphis last season is still a painful memory "
"for the Cardinals."
)
instance1 = {"text": text1, "label": "2"}
text2 = (
"AP - Eli Manning has replaced Kurt Warner as the New York"
" Giants' starting quarterback."
)
instance2 = {"text": text2, "label": "2"}
text3 = (
"A conference dedicated to online journalism explores the "
"effect blogs have on news reporting. Some say they draw "
"attention to under-reported stories. Others struggle to "
"establish the credibility enjoyed by professionals."
)
instance3 = {"text": text3, "label": "4"}
for instance in [instance1, instance2, instance3]:
sentences = splitter.split_sentences(instance["text"])
tokenized_sentences: List[List[str]] = []
for sentence in sentences:
tokens = [token.text for token in spacy_tokenizer(sentence)]
if max_sequence_length:
tokens = tokens[:max_sequence_length]
tokenized_sentences.append(tokens)
instance["tokens"] = tokenized_sentences
assert len(instances) == 3
fields = instances[0].fields
text = [[token.text for token in sentence.tokens] for sentence in fields["tokens"]]
assert text == instance1["tokens"]
assert fields["label"].label == instance1["label"]
fields = instances[1].fields
text = [[token.text for token in sentence.tokens] for sentence in fields["tokens"]]
assert text == instance2["tokens"]
assert fields["label"].label == instance2["label"]
fields = instances[2].fields
text = [[token.text for token in sentence.tokens] for sentence in fields["tokens"]]
assert text == instance3["tokens"]
assert fields["label"].label == instance3["label"]
| allennlp-master | tests/data/dataset_readers/text_classification_json_test.py |
import glob
import os
import tarfile
from collections import Counter
from typing import Tuple
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers import (
SequenceTaggingDatasetReader,
ShardedDatasetReader,
DatasetReader,
)
from allennlp.data.instance import Instance
def fingerprint(instance: Instance) -> Tuple[str, ...]:
"""
Get a hashable representation of a sequence tagging instance
that can be put in a Counter.
"""
text_tuple = tuple(t.text for t in instance.fields["tokens"].tokens) # type: ignore
labels_tuple = tuple(instance.fields["tags"].labels) # type: ignore
return text_tuple + labels_tuple
def test_exception_raised_when_base_reader_implements_sharding():
class ManuallyShardedBaseReader(DatasetReader):
def __init__(self, **kwargs):
super().__init__(manual_distributed_sharding=True, **kwargs)
def _read(self, file_path: str):
pass
def text_to_instance(self, text: str): # type: ignore
pass
with pytest.raises(ValueError, match="should not implement manual distributed sharding"):
ShardedDatasetReader(ManuallyShardedBaseReader())
class TestShardedDatasetReader(AllenNlpTestCase):
def setup_method(self) -> None:
super().setup_method()
# use SequenceTaggingDatasetReader as the base reader
self.base_reader = SequenceTaggingDatasetReader(lazy=True)
base_file_path = AllenNlpTestCase.FIXTURES_ROOT / "data" / "sequence_tagging.tsv"
# Make 100 copies of the data
raw_data = open(base_file_path).read()
for i in range(100):
file_path = self.TEST_DIR / f"identical_{i}.tsv"
with open(file_path, "w") as f:
f.write(raw_data)
self.identical_files_glob = str(self.TEST_DIR / "identical_*.tsv")
# Also create an archive with all of these files to ensure that we can
# pass the archive directory.
current_dir = os.getcwd()
os.chdir(self.TEST_DIR)
self.archive_filename = self.TEST_DIR / "all_data.tar.gz"
with tarfile.open(self.archive_filename, "w:gz") as archive:
for file_path in glob.glob("identical_*.tsv"):
archive.add(file_path)
os.chdir(current_dir)
self.reader = ShardedDatasetReader(base_reader=self.base_reader)
def read_and_check_instances(self, filepath: str):
all_instances = []
for instance in self.reader.read(filepath):
all_instances.append(instance)
# 100 files * 4 sentences / file
assert len(all_instances) == 100 * 4
counts = Counter(fingerprint(instance) for instance in all_instances)
# should have the exact same data 100 times
assert len(counts) == 4
assert counts[("cats", "are", "animals", ".", "N", "V", "N", "N")] == 100
assert counts[("dogs", "are", "animals", ".", "N", "V", "N", "N")] == 100
assert counts[("snakes", "are", "animals", ".", "N", "V", "N", "N")] == 100
assert counts[("birds", "are", "animals", ".", "N", "V", "N", "N")] == 100
def test_sharded_read_glob(self):
self.read_and_check_instances(self.identical_files_glob)
def test_sharded_read_archive(self):
self.read_and_check_instances(str(self.archive_filename))
def test_attributes_inheritance(self):
# current reader has lazy set to true
base_reader = SequenceTaggingDatasetReader(lazy=True)
reader = ShardedDatasetReader(base_reader=base_reader)
assert (
reader.lazy
), "The ShardedDatasetReader didn't inherit the 'lazy' attribute from base_reader"
def test_set_attributes_main(self):
base_reader = SequenceTaggingDatasetReader(lazy=True)
reader = ShardedDatasetReader(base_reader=base_reader, lazy=False)
assert (
not reader.lazy
), "The ShardedDatasetReader inherited the 'lazy' attribute from base_reader. It should be False"
| allennlp-master | tests/data/dataset_readers/sharded_dataset_reader_test.py |
from typing import List
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.dataset_readers.dataset_utils import span_utils
from allennlp.data.tokenizers import Token, SpacyTokenizer
class SpanUtilsTest(AllenNlpTestCase):
def test_bio_tags_to_spans_extracts_correct_spans(self):
tag_sequence = ["O", "B-ARG1", "I-ARG1", "O", "B-ARG2", "I-ARG2", "B-ARG1", "B-ARG2"]
spans = span_utils.bio_tags_to_spans(tag_sequence)
assert set(spans) == {
("ARG1", (1, 2)),
("ARG2", (4, 5)),
("ARG1", (6, 6)),
("ARG2", (7, 7)),
}
# Check that it raises when we use U- tags for single tokens.
tag_sequence = ["O", "B-ARG1", "I-ARG1", "O", "B-ARG2", "I-ARG2", "U-ARG1", "U-ARG2"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.bio_tags_to_spans(tag_sequence)
# Check that invalid BIO sequences are also handled as spans.
tag_sequence = [
"O",
"B-ARG1",
"I-ARG1",
"O",
"I-ARG1",
"B-ARG2",
"I-ARG2",
"B-ARG1",
"I-ARG2",
"I-ARG2",
]
spans = span_utils.bio_tags_to_spans(tag_sequence)
assert set(spans) == {
("ARG1", (1, 2)),
("ARG2", (5, 6)),
("ARG1", (7, 7)),
("ARG1", (4, 4)),
("ARG2", (8, 9)),
}
def test_bio_tags_to_spans_extracts_correct_spans_without_labels(self):
tag_sequence = ["O", "B", "I", "O", "B", "I", "B", "B"]
spans = span_utils.bio_tags_to_spans(tag_sequence)
assert set(spans) == {("", (1, 2)), ("", (4, 5)), ("", (6, 6)), ("", (7, 7))}
# Check that it raises when we use U- tags for single tokens.
tag_sequence = ["O", "B", "I", "O", "B", "I", "U", "U"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.bio_tags_to_spans(tag_sequence)
# Check that invalid BIO sequences are also handled as spans.
tag_sequence = ["O", "B", "I", "O", "I", "B", "I", "B", "I", "I"]
spans = span_utils.bio_tags_to_spans(tag_sequence)
assert set(spans) == {("", (1, 2)), ("", (4, 4)), ("", (5, 6)), ("", (7, 9))}
def test_bio_tags_to_spans_ignores_specified_tags(self):
tag_sequence = [
"B-V",
"I-V",
"O",
"B-ARG1",
"I-ARG1",
"O",
"B-ARG2",
"I-ARG2",
"B-ARG1",
"B-ARG2",
]
spans = span_utils.bio_tags_to_spans(tag_sequence, ["ARG1", "V"])
assert set(spans) == {("ARG2", (6, 7)), ("ARG2", (9, 9))}
def test_iob1_tags_to_spans_extracts_correct_spans_without_labels(self):
tag_sequence = ["I", "B", "I", "O", "B", "I", "B", "B"]
spans = span_utils.iob1_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 2)), ("", (4, 5)), ("", (6, 6)), ("", (7, 7))}
# Check that it raises when we use U- tags for single tokens.
tag_sequence = ["O", "B", "I", "O", "B", "I", "U", "U"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.iob1_tags_to_spans(tag_sequence)
# Check that invalid IOB1 sequences are also handled as spans.
tag_sequence = ["O", "B", "I", "O", "I", "B", "I", "B", "I", "I"]
spans = span_utils.iob1_tags_to_spans(tag_sequence)
assert set(spans) == {("", (1, 2)), ("", (4, 4)), ("", (5, 6)), ("", (7, 9))}
def test_iob1_tags_to_spans_extracts_correct_spans(self):
tag_sequence = ["I-ARG2", "B-ARG1", "I-ARG1", "O", "B-ARG2", "I-ARG2", "B-ARG1", "B-ARG2"]
spans = span_utils.iob1_tags_to_spans(tag_sequence)
assert set(spans) == {
("ARG2", (0, 0)),
("ARG1", (1, 2)),
("ARG2", (4, 5)),
("ARG1", (6, 6)),
("ARG2", (7, 7)),
}
# Check that it raises when we use U- tags for single tokens.
tag_sequence = ["O", "B-ARG1", "I-ARG1", "O", "B-ARG2", "I-ARG2", "U-ARG1", "U-ARG2"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.iob1_tags_to_spans(tag_sequence)
# Check that invalid IOB1 sequences are also handled as spans.
tag_sequence = [
"O",
"B-ARG1",
"I-ARG1",
"O",
"I-ARG1",
"B-ARG2",
"I-ARG2",
"B-ARG1",
"I-ARG2",
"I-ARG2",
]
spans = span_utils.iob1_tags_to_spans(tag_sequence)
assert set(spans) == {
("ARG1", (1, 2)),
("ARG1", (4, 4)),
("ARG2", (5, 6)),
("ARG1", (7, 7)),
("ARG2", (8, 9)),
}
def test_enumerate_spans_enumerates_all_spans(self):
tokenizer = SpacyTokenizer(pos_tags=True)
sentence = tokenizer.tokenize("This is a sentence.")
spans = span_utils.enumerate_spans(sentence)
assert spans == [
(0, 0),
(0, 1),
(0, 2),
(0, 3),
(0, 4),
(1, 1),
(1, 2),
(1, 3),
(1, 4),
(2, 2),
(2, 3),
(2, 4),
(3, 3),
(3, 4),
(4, 4),
]
spans = span_utils.enumerate_spans(sentence, max_span_width=3, min_span_width=2)
assert spans == [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3), (2, 4), (3, 4)]
spans = span_utils.enumerate_spans(sentence, max_span_width=3, min_span_width=2, offset=20)
assert spans == [(20, 21), (20, 22), (21, 22), (21, 23), (22, 23), (22, 24), (23, 24)]
def no_prefixed_punctuation(tokens: List[Token]):
# Only include spans which don't start or end with punctuation.
return tokens[0].pos_ != "PUNCT" and tokens[-1].pos_ != "PUNCT"
spans = span_utils.enumerate_spans(
sentence, max_span_width=3, min_span_width=2, filter_function=no_prefixed_punctuation
)
# No longer includes (2, 4) or (3, 4) as these include punctuation
# as their last element.
assert spans == [(0, 1), (0, 2), (1, 2), (1, 3), (2, 3)]
def test_bioul_tags_to_spans(self):
tag_sequence = ["B-PER", "I-PER", "L-PER", "U-PER", "U-LOC", "O"]
spans = span_utils.bioul_tags_to_spans(tag_sequence)
assert spans == [("PER", (0, 2)), ("PER", (3, 3)), ("LOC", (4, 4))]
tag_sequence = ["B-PER", "I-PER", "O"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.bioul_tags_to_spans(tag_sequence)
def test_bioul_tags_to_spans_without_labels(self):
tag_sequence = ["B", "I", "L", "U", "U", "O"]
spans = span_utils.bioul_tags_to_spans(tag_sequence)
assert spans == [("", (0, 2)), ("", (3, 3)), ("", (4, 4))]
tag_sequence = ["B", "I", "O"]
with pytest.raises(span_utils.InvalidTagSequence):
spans = span_utils.bioul_tags_to_spans(tag_sequence)
def test_iob1_to_bioul(self):
tag_sequence = ["I-ORG", "O", "I-MISC", "O"]
bioul_sequence = span_utils.to_bioul(tag_sequence, encoding="IOB1")
assert bioul_sequence == ["U-ORG", "O", "U-MISC", "O"]
tag_sequence = ["O", "I-PER", "B-PER", "I-PER", "I-PER", "B-PER"]
bioul_sequence = span_utils.to_bioul(tag_sequence, encoding="IOB1")
assert bioul_sequence == ["O", "U-PER", "B-PER", "I-PER", "L-PER", "U-PER"]
def test_bio_to_bioul(self):
tag_sequence = ["B-ORG", "O", "B-MISC", "O", "B-MISC", "I-MISC", "I-MISC"]
bioul_sequence = span_utils.to_bioul(tag_sequence, encoding="BIO")
assert bioul_sequence == ["U-ORG", "O", "U-MISC", "O", "B-MISC", "I-MISC", "L-MISC"]
# Encoding in IOB format should throw error with incorrect encoding.
with pytest.raises(span_utils.InvalidTagSequence):
tag_sequence = ["O", "I-PER", "B-PER", "I-PER", "I-PER", "B-PER"]
bioul_sequence = span_utils.to_bioul(tag_sequence, encoding="BIO")
def test_bmes_tags_to_spans_extracts_correct_spans(self):
tag_sequence = ["B-ARG1", "M-ARG1", "E-ARG1", "B-ARG2", "E-ARG2", "S-ARG3"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 2)), ("ARG2", (3, 4)), ("ARG3", (5, 5))}
tag_sequence = ["S-ARG1", "B-ARG2", "E-ARG2", "S-ARG3"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG2", (1, 2)), ("ARG3", (3, 3))}
# Invalid labels.
tag_sequence = ["B-ARG1", "M-ARG2"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG2", (1, 1))}
tag_sequence = ["B-ARG1", "E-ARG2"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG2", (1, 1))}
tag_sequence = ["B-ARG1", "M-ARG1", "M-ARG2"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 1)), ("ARG2", (2, 2))}
tag_sequence = ["B-ARG1", "M-ARG1", "E-ARG2"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 1)), ("ARG2", (2, 2))}
# Invalid transitions.
tag_sequence = ["B-ARG1", "B-ARG1"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG1", (1, 1))}
tag_sequence = ["B-ARG1", "S-ARG1"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("ARG1", (0, 0)), ("ARG1", (1, 1))}
def test_bmes_tags_to_spans_extracts_correct_spans_without_labels(self):
# Good transitions.
tag_sequence = ["B", "M", "E", "B", "E", "S"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 2)), ("", (3, 4)), ("", (5, 5))}
tag_sequence = ["S", "B", "E", "S"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 2)), ("", (3, 3))}
# Invalid transitions.
tag_sequence = ["B", "B", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 2))}
tag_sequence = ["B", "S"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 1))}
tag_sequence = ["M", "B", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 2))}
tag_sequence = ["B", "M", "S"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 1)), ("", (2, 2))}
tag_sequence = ["B", "E", "M", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 1)), ("", (2, 3))}
tag_sequence = ["B", "E", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 1)), ("", (2, 2))}
tag_sequence = ["S", "M"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 1))}
tag_sequence = ["S", "E"]
spans = span_utils.bmes_tags_to_spans(tag_sequence)
assert set(spans) == {("", (0, 0)), ("", (1, 1))}
| allennlp-master | tests/data/dataset_readers/dataset_utils/span_utils_test.py |
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.token_indexers.spacy_indexer import SpacyTokenIndexer
from allennlp.data.fields.text_field import TextField
from allennlp.common.util import get_spacy_model
from allennlp.data.vocabulary import Vocabulary
class TestSpacyTokenIndexer(AllenNlpTestCase):
def test_as_array_produces_token_array(self):
indexer = SpacyTokenIndexer()
nlp = get_spacy_model("en_core_web_sm", pos_tags=True, parse=False, ner=False)
tokens = [t for t in nlp("This is a sentence.")]
field = TextField(tokens, token_indexers={"spacy": indexer})
vocab = Vocabulary()
field.index(vocab)
# Indexer functionality
array_dict = indexer.tokens_to_indices(tokens, vocab)
assert len(array_dict["tokens"]) == 5
assert len(array_dict["tokens"][0]) == 96
# Check it also works with field
lengths = field.get_padding_lengths()
array_dict = field.as_tensor(lengths)
assert list(array_dict["spacy"]["tokens"].shape) == [5, 96]
| allennlp-master | tests/data/token_indexers/spacy_indexer_test.py |
allennlp-master | tests/data/token_indexers/__init__.py |
|
import numpy as np
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary, Instance
from allennlp.data.batch import Batch
from allennlp.data.token_indexers import ELMoTokenCharactersIndexer
from allennlp.data.fields import ListField, TextField
class TestELMoTokenCharactersIndexer(AllenNlpTestCase):
def test_bos_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.tokens_to_indices([Token("<S>")], Vocabulary())
expected_indices = [
259,
257,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
]
assert indices == {"elmo_tokens": [expected_indices]}
def test_eos_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.tokens_to_indices([Token("</S>")], Vocabulary())
expected_indices = [
259,
258,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
]
assert indices == {"elmo_tokens": [expected_indices]}
def test_unicode_to_char_ids(self):
indexer = ELMoTokenCharactersIndexer()
indices = indexer.tokens_to_indices([Token(chr(256) + "t")], Vocabulary())
expected_indices = [
259,
197,
129,
117,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
]
assert indices == {"elmo_tokens": [expected_indices]}
def test_elmo_as_array_produces_token_sequence(self):
indexer = ELMoTokenCharactersIndexer()
tokens = [Token("Second"), Token(".")]
indices = indexer.tokens_to_indices(tokens, Vocabulary())
padded_tokens = indexer.as_padded_tensor_dict(indices, padding_lengths={"elmo_tokens": 3})
expected_padded_tokens = [
[
259,
84,
102,
100,
112,
111,
101,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
],
[
259,
47,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
],
[
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
],
]
assert padded_tokens["elmo_tokens"].tolist() == expected_padded_tokens
def test_elmo_indexer_with_additional_tokens(self):
indexer = ELMoTokenCharactersIndexer(tokens_to_add={"<first>": 1})
tokens = [Token("<first>")]
indices = indexer.tokens_to_indices(tokens, Vocabulary())
expected_indices = [
[
259,
2,
260,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
261,
]
]
assert indices["elmo_tokens"] == expected_indices
def test_elmo_empty_token_list(self):
# Basic test
indexer = ELMoTokenCharactersIndexer()
assert {"elmo_tokens": []} == indexer.get_empty_token_list()
# Real world test
indexer = {"elmo": indexer}
tokens_1 = TextField([Token("Apple")], indexer)
targets_1 = ListField([TextField([Token("Apple")], indexer)])
tokens_2 = TextField([Token("Screen"), Token("device")], indexer)
targets_2 = ListField(
[TextField([Token("Screen")], indexer), TextField([Token("Device")], indexer)]
)
instance_1 = Instance({"tokens": tokens_1, "targets": targets_1})
instance_2 = Instance({"tokens": tokens_2, "targets": targets_2})
a_batch = Batch([instance_1, instance_2])
a_batch.index_instances(Vocabulary())
batch_tensor = a_batch.as_tensor_dict()
elmo_target_token_indices = batch_tensor["targets"]["elmo"]["elmo_tokens"]
# The TextField that is empty should have been created using the
# `get_empty_token_list` and then padded with zeros.
empty_target = elmo_target_token_indices[0][1].numpy()
np.testing.assert_array_equal(np.zeros((1, 50)), empty_target)
non_empty_targets = [
elmo_target_token_indices[0][0],
elmo_target_token_indices[1][0],
elmo_target_token_indices[1][1],
]
for non_empty_target in non_empty_targets:
with pytest.raises(AssertionError):
np.testing.assert_array_equal(np.zeros((1, 50)), non_empty_target)
| allennlp-master | tests/data/token_indexers/elmo_indexer_test.py |
from allennlp.common import cached_transformers
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import PretrainedTransformerMismatchedIndexer
class TestPretrainedTransformerMismatchedIndexer(AllenNlpTestCase):
def test_bert(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-cased")
indexer = PretrainedTransformerMismatchedIndexer("bert-base-cased")
text = ["AllenNLP", "is", "great"]
tokens = tokenizer.tokenize(" ".join(["[CLS]"] + text + ["[SEP]"]))
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices([Token(word) for word in text], vocab)
assert indexed["token_ids"] == expected_ids
assert indexed["mask"] == [True] * len(text)
# Hardcoding a few things because we know how BERT tokenization works
assert indexed["offsets"] == [(1, 3), (4, 4), (5, 5)]
assert indexed["wordpiece_mask"] == [True] * len(expected_ids)
keys = indexed.keys()
assert indexer.get_empty_token_list() == {key: [] for key in keys}
max_length = 10
padding_lengths = {key: max_length for key in keys}
padded_tokens = indexer.as_padded_tensor_dict(indexed, padding_lengths)
for key in keys:
padding_length = max_length - len(indexed[key])
if key == "offsets":
padding = (0, 0)
elif "mask" in key:
padding = False
else:
padding = 0
expected_value = indexed[key] + ([padding] * padding_length)
assert len(padded_tokens[key]) == max_length
if key == "offsets":
expected_value = [list(t) for t in expected_value]
assert padded_tokens[key].tolist() == expected_value
def test_long_sequence_splitting(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-uncased")
indexer = PretrainedTransformerMismatchedIndexer("bert-base-uncased", max_length=4)
text = ["AllenNLP", "is", "great"]
tokens = tokenizer.tokenize(" ".join(["[CLS]"] + text + ["[SEP]"]))
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
assert len(expected_ids) == 7 # just to make sure it's what we're expecting
cls_id, sep_id = expected_ids[0], expected_ids[-1]
expected_ids = (
expected_ids[:3]
+ [sep_id, cls_id]
+ expected_ids[3:5]
+ [sep_id, cls_id]
+ expected_ids[5:]
)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices([Token(word) for word in text], vocab)
assert indexed["token_ids"] == expected_ids
# [CLS] allen ##nl [SEP] [CLS] #p is [SEP] [CLS] great [SEP]
assert indexed["segment_concat_mask"] == [True] * len(expected_ids)
# allennlp is great
assert indexed["mask"] == [True] * len(text)
# [CLS] allen #nl #p is great [SEP]
assert indexed["wordpiece_mask"] == [True] * 7
| allennlp-master | tests/data/token_indexers/pretrained_transformer_mismatched_indexer_test.py |
from collections import defaultdict
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import TokenCharactersIndexer
from allennlp.data.tokenizers.character_tokenizer import CharacterTokenizer
class CharacterTokenIndexerTest(AllenNlpTestCase):
def test_count_vocab_items_respects_casing(self):
indexer = TokenCharactersIndexer("characters", min_padding_length=5)
counter = defaultdict(lambda: defaultdict(int))
indexer.count_vocab_items(Token("Hello"), counter)
indexer.count_vocab_items(Token("hello"), counter)
assert counter["characters"] == {"h": 1, "H": 1, "e": 2, "l": 4, "o": 2}
indexer = TokenCharactersIndexer(
"characters", CharacterTokenizer(lowercase_characters=True), min_padding_length=5
)
counter = defaultdict(lambda: defaultdict(int))
indexer.count_vocab_items(Token("Hello"), counter)
indexer.count_vocab_items(Token("hello"), counter)
assert counter["characters"] == {"h": 2, "e": 2, "l": 4, "o": 2}
def test_as_array_produces_token_sequence(self):
indexer = TokenCharactersIndexer("characters", min_padding_length=1)
padded_tokens = indexer.as_padded_tensor_dict(
{"token_characters": [[1, 2, 3, 4, 5], [1, 2, 3], [1]]},
padding_lengths={"token_characters": 4, "num_token_characters": 10},
)
assert padded_tokens["token_characters"].tolist() == [
[1, 2, 3, 4, 5, 0, 0, 0, 0, 0],
[1, 2, 3, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
def test_tokens_to_indices_produces_correct_characters(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("A", namespace="characters")
vocab.add_token_to_namespace("s", namespace="characters")
vocab.add_token_to_namespace("e", namespace="characters")
vocab.add_token_to_namespace("n", namespace="characters")
vocab.add_token_to_namespace("t", namespace="characters")
vocab.add_token_to_namespace("c", namespace="characters")
indexer = TokenCharactersIndexer("characters", min_padding_length=1)
indices = indexer.tokens_to_indices([Token("sentential")], vocab)
assert indices == {"token_characters": [[3, 4, 5, 6, 4, 5, 6, 1, 1, 1]]}
def test_start_and_end_tokens(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("A", namespace="characters") # 2
vocab.add_token_to_namespace("s", namespace="characters") # 3
vocab.add_token_to_namespace("e", namespace="characters") # 4
vocab.add_token_to_namespace("n", namespace="characters") # 5
vocab.add_token_to_namespace("t", namespace="characters") # 6
vocab.add_token_to_namespace("c", namespace="characters") # 7
vocab.add_token_to_namespace("<", namespace="characters") # 8
vocab.add_token_to_namespace(">", namespace="characters") # 9
vocab.add_token_to_namespace("/", namespace="characters") # 10
indexer = TokenCharactersIndexer(
"characters", start_tokens=["<s>"], end_tokens=["</s>"], min_padding_length=1
)
indices = indexer.tokens_to_indices([Token("sentential")], vocab)
assert indices == {
"token_characters": [[8, 3, 9], [3, 4, 5, 6, 4, 5, 6, 1, 1, 1], [8, 10, 3, 9]]
}
def test_min_padding_length(self):
sentence = "AllenNLP is awesome ."
tokens = [Token(token) for token in sentence.split(" ")]
vocab = Vocabulary()
vocab.add_token_to_namespace("A", namespace="characters") # 2
vocab.add_token_to_namespace("l", namespace="characters") # 3
vocab.add_token_to_namespace("e", namespace="characters") # 4
vocab.add_token_to_namespace("n", namespace="characters") # 5
vocab.add_token_to_namespace("N", namespace="characters") # 6
vocab.add_token_to_namespace("L", namespace="characters") # 7
vocab.add_token_to_namespace("P", namespace="characters") # 8
vocab.add_token_to_namespace("i", namespace="characters") # 9
vocab.add_token_to_namespace("s", namespace="characters") # 10
vocab.add_token_to_namespace("a", namespace="characters") # 11
vocab.add_token_to_namespace("w", namespace="characters") # 12
vocab.add_token_to_namespace("o", namespace="characters") # 13
vocab.add_token_to_namespace("m", namespace="characters") # 14
vocab.add_token_to_namespace(".", namespace="characters") # 15
indexer = TokenCharactersIndexer("characters", min_padding_length=10)
indices = indexer.tokens_to_indices(tokens, vocab)
padded = indexer.as_padded_tensor_dict(indices, indexer.get_padding_lengths(indices))
assert padded["token_characters"].tolist() == [
[2, 3, 3, 4, 5, 6, 7, 8, 0, 0],
[9, 10, 0, 0, 0, 0, 0, 0, 0, 0],
[11, 12, 4, 10, 13, 14, 4, 0, 0, 0],
[15, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
def test_warn_min_padding_length(self):
with pytest.warns(
UserWarning, match=r"using the default value \(0\) of `min_padding_length`"
):
TokenCharactersIndexer("characters")
| allennlp-master | tests/data/token_indexers/character_token_indexer_test.py |
from collections import defaultdict
from dataclasses import dataclass
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import SpacyTokenizer
@dataclass(init=False)
class TokenWithStyle(Token):
__slots__ = ["is_bold"]
is_bold: bool
def __init__(self, text: str = None, is_bold: bool = False):
super().__init__(text=text)
self.is_bold = is_bold
class TestSingleIdTokenIndexer(AllenNlpTestCase):
def test_count_vocab_items_respects_casing(self):
indexer = SingleIdTokenIndexer("words")
counter = defaultdict(lambda: defaultdict(int))
indexer.count_vocab_items(Token("Hello"), counter)
indexer.count_vocab_items(Token("hello"), counter)
assert counter["words"] == {"hello": 1, "Hello": 1}
indexer = SingleIdTokenIndexer("words", lowercase_tokens=True)
counter = defaultdict(lambda: defaultdict(int))
indexer.count_vocab_items(Token("Hello"), counter)
indexer.count_vocab_items(Token("hello"), counter)
assert counter["words"] == {"hello": 2}
def test_as_array_produces_token_sequence(self):
indexer = SingleIdTokenIndexer("words")
padded_tokens = indexer.as_padded_tensor_dict({"tokens": [1, 2, 3, 4, 5]}, {"tokens": 10})
assert padded_tokens["tokens"].tolist() == [1, 2, 3, 4, 5, 0, 0, 0, 0, 0]
def test_count_other_features(self):
indexer = SingleIdTokenIndexer("other_features", feature_name="is_bold")
counter = defaultdict(lambda: defaultdict(int))
token = TokenWithStyle("Header")
token.is_bold = "True"
indexer.count_vocab_items(token, counter)
assert counter["other_features"] == {"True": 1}
def test_count_vocab_items_with_non_default_feature_name(self):
tokenizer = SpacyTokenizer(parse=True)
tokens = tokenizer.tokenize("This is a sentence.")
tokens = [Token("<S>")] + [t for t in tokens] + [Token("</S>")]
indexer = SingleIdTokenIndexer(
namespace="dep_labels", feature_name="dep_", default_value="NONE"
)
counter = defaultdict(lambda: defaultdict(int))
for token in tokens:
indexer.count_vocab_items(token, counter)
assert counter["dep_labels"] == {
"ROOT": 1,
"nsubj": 1,
"det": 1,
"NONE": 2,
"attr": 1,
"punct": 1,
}
def test_tokens_to_indices_with_non_default_feature_name(self):
tokenizer = SpacyTokenizer(parse=True)
tokens = tokenizer.tokenize("This is a sentence.")
tokens = [t for t in tokens] + [Token("</S>")]
vocab = Vocabulary()
root_index = vocab.add_token_to_namespace("ROOT", namespace="dep_labels")
none_index = vocab.add_token_to_namespace("NONE", namespace="dep_labels")
indexer = SingleIdTokenIndexer(
namespace="dep_labels", feature_name="dep_", default_value="NONE"
)
assert indexer.tokens_to_indices([tokens[1]], vocab) == {"tokens": [root_index]}
assert indexer.tokens_to_indices([tokens[-1]], vocab) == {"tokens": [none_index]}
def test_crashes_with_empty_feature_value_and_no_default(self):
tokenizer = SpacyTokenizer(parse=True)
tokens = tokenizer.tokenize("This is a sentence.")
tokens = [t for t in tokens] + [Token("</S>")]
vocab = Vocabulary()
vocab.add_token_to_namespace("ROOT", namespace="dep_labels")
vocab.add_token_to_namespace("NONE", namespace="dep_labels")
indexer = SingleIdTokenIndexer(namespace="dep_labels", feature_name="dep_")
with pytest.raises(ValueError):
indexer.tokens_to_indices([tokens[-1]], vocab)
def test_no_namespace_means_no_counting(self):
tokenizer = SpacyTokenizer(parse=True)
tokens = tokenizer.tokenize("This is a sentence.")
tokens = [Token("<S>")] + [t for t in tokens] + [Token("</S>")]
indexer = SingleIdTokenIndexer(namespace=None, feature_name="text_id")
def fail():
assert False
counter = defaultdict(fail)
for token in tokens:
indexer.count_vocab_items(token, counter)
def test_no_namespace_means_no_indexing(self):
indexer = SingleIdTokenIndexer(namespace=None, feature_name="text_id")
assert indexer.tokens_to_indices([Token(text_id=23)], None) == {"tokens": [23]}
| allennlp-master | tests/data/token_indexers/single_id_token_indexer_test.py |
import pytest
from allennlp.common import cached_transformers
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
class TestPretrainedTransformerIndexer(AllenNlpTestCase):
def test_as_array_produces_token_sequence_bert_uncased(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-uncased")
allennlp_tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
indexer = PretrainedTransformerIndexer(model_name="bert-base-uncased")
string_specials = "[CLS] AllenNLP is great [SEP]"
string_no_specials = "AllenNLP is great"
tokens = tokenizer.tokenize(string_specials)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
# tokens tokenized with our pretrained tokenizer have indices in them
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
def test_as_array_produces_token_sequence_bert_cased(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-cased")
allennlp_tokenizer = PretrainedTransformerTokenizer("bert-base-cased")
indexer = PretrainedTransformerIndexer(model_name="bert-base-cased")
string_specials = "[CLS] AllenNLP is great [SEP]"
string_no_specials = "AllenNLP is great"
tokens = tokenizer.tokenize(string_specials)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
# tokens tokenized with our pretrained tokenizer have indices in them
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
def test_as_array_produces_token_sequence_bert_cased_sentence_pair(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-cased")
allennlp_tokenizer = PretrainedTransformerTokenizer(
"bert-base-cased", add_special_tokens=False
)
indexer = PretrainedTransformerIndexer(model_name="bert-base-cased")
default_format = "[CLS] AllenNLP is great! [SEP] Really it is! [SEP]"
tokens = tokenizer.tokenize(default_format)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
allennlp_tokens = allennlp_tokenizer.add_special_tokens(
allennlp_tokenizer.tokenize("AllenNLP is great!"),
allennlp_tokenizer.tokenize("Really it is!"),
)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
def test_as_array_produces_token_sequence_roberta(self):
tokenizer = cached_transformers.get_tokenizer("roberta-base")
allennlp_tokenizer = PretrainedTransformerTokenizer("roberta-base")
indexer = PretrainedTransformerIndexer(model_name="roberta-base")
string_specials = "<s>AllenNLP is great</s>"
string_no_specials = "AllenNLP is great"
tokens = tokenizer.tokenize(string_specials)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
# tokens tokenized with our pretrained tokenizer have indices in them
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
def test_as_array_produces_token_sequence_roberta_sentence_pair(self):
tokenizer = cached_transformers.get_tokenizer("roberta-base")
allennlp_tokenizer = PretrainedTransformerTokenizer(
"roberta-base", add_special_tokens=False
)
indexer = PretrainedTransformerIndexer(model_name="roberta-base")
default_format = "<s>AllenNLP is great!</s></s>Really it is!</s>"
tokens = tokenizer.tokenize(default_format)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
allennlp_tokens = allennlp_tokenizer.add_special_tokens(
allennlp_tokenizer.tokenize("AllenNLP is great!"),
allennlp_tokenizer.tokenize("Really it is!"),
)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids, f"{allennlp_tokens}\n{tokens}"
@pytest.mark.parametrize("model_name", ["roberta-base", "bert-base-cased", "xlm-mlm-ende-1024"])
def test_transformers_vocab_sizes(self, model_name):
namespace = "tags"
tokenizer = cached_transformers.get_tokenizer(model_name)
allennlp_tokenizer = PretrainedTransformerTokenizer(model_name)
indexer = PretrainedTransformerIndexer(model_name=model_name, namespace=namespace)
allennlp_tokens = allennlp_tokenizer.tokenize("AllenNLP is great!")
vocab = Vocabulary()
# here we copy entire transformers vocab
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
del indexed
assert vocab.get_vocab_size(namespace=namespace) == tokenizer.vocab_size
def test_transformers_vocabs_added_correctly(self):
namespace, model_name = "tags", "roberta-base"
tokenizer = cached_transformers.get_tokenizer(model_name, use_fast=False)
allennlp_tokenizer = PretrainedTransformerTokenizer(model_name)
indexer = PretrainedTransformerIndexer(model_name=model_name, namespace=namespace)
allennlp_tokens = allennlp_tokenizer.tokenize("AllenNLP is great!")
vocab = Vocabulary()
# here we copy entire transformers vocab
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
del indexed
assert vocab.get_token_to_index_vocabulary(namespace=namespace) == tokenizer.encoder
def test_mask(self):
# We try these models, because
# - BERT pads tokens with 0
# - RoBERTa pads tokens with 1
# - GPT2 has no padding token, so we choose 0
for model in ["bert-base-uncased", "roberta-base", "gpt2"]:
allennlp_tokenizer = PretrainedTransformerTokenizer(model)
indexer = PretrainedTransformerIndexer(model_name=model)
string_no_specials = "AllenNLP is great"
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
expected_masks = [True] * len(indexed["token_ids"])
assert indexed["mask"] == expected_masks
max_length = 10
padding_lengths = {key: max_length for key in indexed.keys()}
padded_tokens = indexer.as_padded_tensor_dict(indexed, padding_lengths)
padding_length = max_length - len(indexed["mask"])
expected_masks = expected_masks + ([False] * padding_length)
assert len(padded_tokens["mask"]) == max_length
assert padded_tokens["mask"].tolist() == expected_masks
assert len(padded_tokens["token_ids"]) == max_length
pad_token_id = allennlp_tokenizer.tokenizer.pad_token_id
if pad_token_id is None:
pad_token_id = 0
padding_suffix = [pad_token_id] * padding_length
assert padded_tokens["token_ids"][-padding_length:].tolist() == padding_suffix
def test_long_sequence_splitting(self):
tokenizer = cached_transformers.get_tokenizer("bert-base-uncased")
allennlp_tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
indexer = PretrainedTransformerIndexer(model_name="bert-base-uncased", max_length=4)
string_specials = "[CLS] AllenNLP is great [SEP]"
string_no_specials = "AllenNLP is great"
tokens = tokenizer.tokenize(string_specials)
expected_ids = tokenizer.convert_tokens_to_ids(tokens)
assert len(expected_ids) == 7 # just to make sure it's what we're expecting
cls_id, sep_id = expected_ids[0], expected_ids[-1]
expected_ids = (
expected_ids[:3]
+ [sep_id, cls_id]
+ expected_ids[3:5]
+ [sep_id, cls_id]
+ expected_ids[5:]
)
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer.tokens_to_indices(allennlp_tokens, vocab)
assert indexed["token_ids"] == expected_ids
assert indexed["segment_concat_mask"] == [True] * len(expected_ids)
assert indexed["mask"] == [True] * 7 # original length
@staticmethod
def _assert_tokens_equal(expected_tokens, actual_tokens):
for expected, actual in zip(expected_tokens, actual_tokens):
assert expected.text == actual.text
assert expected.text_id == actual.text_id
assert expected.type_id == actual.type_id
def test_indices_to_tokens(self):
allennlp_tokenizer = PretrainedTransformerTokenizer("bert-base-uncased")
indexer_max_length = PretrainedTransformerIndexer(
model_name="bert-base-uncased", max_length=4
)
indexer_no_max_length = PretrainedTransformerIndexer(model_name="bert-base-uncased")
string_no_specials = "AllenNLP is great"
allennlp_tokens = allennlp_tokenizer.tokenize(string_no_specials)
vocab = Vocabulary()
indexed = indexer_no_max_length.tokens_to_indices(allennlp_tokens, vocab)
tokens_from_indices = indexer_no_max_length.indices_to_tokens(indexed, vocab)
self._assert_tokens_equal(allennlp_tokens, tokens_from_indices)
indexed = indexer_max_length.tokens_to_indices(allennlp_tokens, vocab)
tokens_from_indices = indexer_max_length.indices_to_tokens(indexed, vocab)
# For now we are not removing special tokens introduced from max_length
sep_cls = [allennlp_tokens[-1], allennlp_tokens[0]]
expected = (
allennlp_tokens[:3] + sep_cls + allennlp_tokens[3:5] + sep_cls + allennlp_tokens[5:]
)
self._assert_tokens_equal(expected, tokens_from_indices)
| allennlp-master | tests/data/token_indexers/pretrained_transformer_indexer_test.py |
from allennlp.common import Params
from allennlp.data import Instance, Token
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.samplers import MaxTokensBatchSampler
from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset
from allennlp.data.dataloader import PyTorchDataLoader
from .sampler_test import SamplerTest
class TestMaxTokensSampler(SamplerTest):
def test_create_batches_groups_correctly(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = MaxTokensBatchSampler(
dataset, max_tokens=8, padding_noise=0, sorting_keys=["text"]
)
grouped_instances = []
for indices in sampler:
grouped_instances.append([self.instances[idx] for idx in indices])
expected_groups = [
[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]],
]
for group in grouped_instances:
assert group in expected_groups
expected_groups.remove(group)
assert expected_groups == []
def test_guess_sorting_key_picks_the_longest_key(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = MaxTokensBatchSampler(dataset, max_tokens=8, padding_noise=0)
instances = []
short_tokens = [Token(t) for t in ["what", "is", "this", "?"]]
long_tokens = [Token(t) for t in ["this", "is", "a", "not", "very", "long", "passage"]]
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
assert sampler.sorting_keys is None
sampler._guess_sorting_keys(instances)
assert sampler.sorting_keys == ["passage"]
def test_from_params(self):
dataset = AllennlpDataset(self.instances, self.vocab)
params = Params({})
sorting_keys = ["s1", "s2"]
params["sorting_keys"] = sorting_keys
params["max_tokens"] = 32
sampler = MaxTokensBatchSampler.from_params(params=params, data_source=dataset)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.1
assert sampler.max_tokens == 32
params = Params({"sorting_keys": sorting_keys, "padding_noise": 0.5, "max_tokens": 100})
sampler = MaxTokensBatchSampler.from_params(params=params, data_source=dataset)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.5
assert sampler.max_tokens == 100
def test_batch_count(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = MaxTokensBatchSampler(
dataset, max_tokens=8, padding_noise=0, sorting_keys=["text"]
)
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
dataloader = PyTorchDataLoader(
dataset, batch_sampler=sampler, collate_fn=lambda x: Batch(x)
)
assert len(dataloader) == 3
| allennlp-master | tests/data/samplers/max_tokens_batch_sampler_test.py |
allennlp-master | tests/data/samplers/__init__.py |
|
from typing import List, Iterable, Dict, Union
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Vocabulary, Instance, Token, Batch
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class LazyIterable:
def __init__(self, instances):
self._instances = instances
def __iter__(self):
return (instance for instance in self._instances)
class SamplerTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.token_indexers = {"tokens": SingleIdTokenIndexer()}
self.vocab = Vocabulary()
self.this_index = self.vocab.add_token_to_namespace("this")
self.is_index = self.vocab.add_token_to_namespace("is")
self.a_index = self.vocab.add_token_to_namespace("a")
self.sentence_index = self.vocab.add_token_to_namespace("sentence")
self.another_index = self.vocab.add_token_to_namespace("another")
self.yet_index = self.vocab.add_token_to_namespace("yet")
self.very_index = self.vocab.add_token_to_namespace("very")
self.long_index = self.vocab.add_token_to_namespace("long")
instances = [
self.create_instance(["this", "is", "a", "sentence"]),
self.create_instance(["this", "is", "another", "sentence"]),
self.create_instance(["yet", "another", "sentence"]),
self.create_instance(
["this", "is", "a", "very", "very", "very", "very", "long", "sentence"]
),
self.create_instance(["sentence"]),
]
self.instances = instances
self.lazy_instances = LazyIterable(instances)
def create_instance(self, str_tokens: List[str]):
tokens = [Token(t) for t in str_tokens]
instance = Instance({"text": TextField(tokens, self.token_indexers)})
return instance
def create_instances_from_token_counts(self, token_counts: List[int]) -> List[Instance]:
return [self.create_instance(["word"] * count) for count in token_counts]
def get_batches_stats(self, batches: Iterable[Batch]) -> Dict[str, Union[int, List[int]]]:
grouped_instances = [batch.instances for batch in batches]
group_lengths = [len(group) for group in grouped_instances]
sample_sizes = []
for batch in batches:
batch_sequence_length = max(
instance.get_padding_lengths()["text"]["tokens___tokens"]
for instance in batch.instances
)
sample_sizes.append(batch_sequence_length * len(batch.instances))
return {
"batch_lengths": group_lengths,
"total_instances": sum(group_lengths),
"sample_sizes": sample_sizes,
}
def assert_instances_are_correct(self, candidate_instances):
# First we need to remove padding tokens from the candidates.
candidate_instances = [
tuple(w for w in instance if w != 0) for instance in candidate_instances
]
expected_instances = [
tuple(instance.fields["text"]._indexed_tokens["tokens"]["tokens"])
for instance in self.instances
]
assert set(candidate_instances) == set(expected_instances)
| allennlp-master | tests/data/samplers/sampler_test.py |
from allennlp.common import Params
from allennlp.data import Instance, Token
from allennlp.data.batch import Batch
from allennlp.data.fields import TextField
from allennlp.data.samplers import BucketBatchSampler
from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset
from allennlp.data.dataloader import PyTorchDataLoader
from .sampler_test import SamplerTest
class TestBucketSampler(SamplerTest):
def test_create_batches_groups_correctly(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(dataset, batch_size=2, padding_noise=0, sorting_keys=["text"])
grouped_instances = []
for indices in sampler:
grouped_instances.append([self.instances[idx] for idx in indices])
expected_groups = [
[self.instances[4], self.instances[2]],
[self.instances[0], self.instances[1]],
[self.instances[3]],
]
for group in grouped_instances:
assert group in expected_groups
expected_groups.remove(group)
assert expected_groups == []
def test_guess_sorting_key_picks_the_longest_key(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(dataset, batch_size=2, padding_noise=0)
instances = []
short_tokens = [Token(t) for t in ["what", "is", "this", "?"]]
long_tokens = [Token(t) for t in ["this", "is", "a", "not", "very", "long", "passage"]]
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
instances.append(
Instance(
{
"question": TextField(short_tokens, self.token_indexers),
"passage": TextField(long_tokens, self.token_indexers),
}
)
)
assert sampler.sorting_keys is None
sampler._guess_sorting_keys(instances)
assert sampler.sorting_keys == ["passage"]
def test_from_params(self):
dataset = AllennlpDataset(self.instances, self.vocab)
params = Params({})
sorting_keys = ["s1", "s2"]
params["sorting_keys"] = sorting_keys
params["batch_size"] = 32
sampler = BucketBatchSampler.from_params(params=params, data_source=dataset)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.1
assert sampler.batch_size == 32
params = Params(
{
"sorting_keys": sorting_keys,
"padding_noise": 0.5,
"batch_size": 100,
"drop_last": True,
}
)
sampler = BucketBatchSampler.from_params(params=params, data_source=dataset)
assert sampler.sorting_keys == sorting_keys
assert sampler.padding_noise == 0.5
assert sampler.batch_size == 100
assert sampler.drop_last
def test_drop_last_works(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(
dataset,
batch_size=2,
padding_noise=0,
sorting_keys=["text"],
drop_last=True,
)
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
dataloader = PyTorchDataLoader(
dataset, batch_sampler=sampler, collate_fn=lambda x: Batch(x)
)
batches = [batch for batch in iter(dataloader)]
stats = self.get_batches_stats(batches)
# all batches have length batch_size
assert all(batch_len == 2 for batch_len in stats["batch_lengths"])
# we should have lost one instance by skipping the last batch
assert stats["total_instances"] == len(self.instances) - 1
def test_batch_count(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(dataset, batch_size=2, padding_noise=0, sorting_keys=["text"])
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
dataloader = PyTorchDataLoader(
dataset, batch_sampler=sampler, collate_fn=lambda x: Batch(x)
)
assert len(dataloader) == 3
def test_batch_count_with_drop_last(self):
dataset = AllennlpDataset(self.instances, vocab=self.vocab)
sampler = BucketBatchSampler(
dataset,
batch_size=2,
padding_noise=0,
sorting_keys=["text"],
drop_last=True,
)
# We use a custom collate_fn for testing, which doesn't actually create tensors,
# just the allennlp Batches.
dataloader = PyTorchDataLoader(
dataset, batch_sampler=sampler, collate_fn=lambda x: Batch(x)
)
assert len(dataloader) == 2
| allennlp-master | tests/data/samplers/bucket_batch_sampler_test.py |
import numpy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token
from allennlp.data.fields import TextField, IndexField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestIndexField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.text = TextField(
[Token(t) for t in ["here", "is", "a", "sentence", "."]],
{"words": SingleIdTokenIndexer("words")},
)
def test_as_tensor_converts_field_correctly(self):
index_field = IndexField(4, self.text)
tensor = index_field.as_tensor(index_field.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_equal(tensor, numpy.array([4]))
def test_index_field_raises_on_incorrect_label_type(self):
with pytest.raises(ConfigurationError):
_ = IndexField("hello", self.text)
def test_index_field_empty_field_works(self):
index_field = IndexField(4, self.text)
empty_index = index_field.empty_field()
assert empty_index.sequence_index == -1
def test_printing_doesnt_crash(self):
print(self.text)
def test_equality(self):
index_field1 = IndexField(4, self.text)
index_field2 = IndexField(4, self.text)
index_field3 = IndexField(
4,
TextField(
[Token(t) for t in ["AllenNLP", "is", "the", "bomb", "!"]],
{"words": SingleIdTokenIndexer("words")},
),
)
assert index_field1 == 4
assert index_field1 == index_field1
assert index_field1 == index_field2
assert index_field1 != index_field3
assert index_field2 != index_field3
assert index_field3 == index_field3
| allennlp-master | tests/data/fields/index_field_test.py |
import numpy
import torch
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.data.fields import ArrayField, ListField
class TestArrayField(AllenNlpTestCase):
def test_get_padding_lengths_correctly_returns_ordered_shape(self):
shape = [3, 4, 5, 6]
array = numpy.zeros(shape)
array_field = ArrayField(array)
lengths = array_field.get_padding_lengths()
for i in range(len(lengths)):
assert lengths["dimension_{}".format(i)] == shape[i]
def test_as_tensor_handles_larger_padding_dimensions(self):
shape = [3, 4]
array = numpy.ones(shape)
array_field = ArrayField(array)
padded_tensor = (
array_field.as_tensor({"dimension_0": 5, "dimension_1": 6}).detach().cpu().numpy()
)
numpy.testing.assert_array_equal(padded_tensor[:3, :4], array)
numpy.testing.assert_array_equal(padded_tensor[3:, 4:], 0.0)
def test_padding_handles_list_fields(self):
array1 = ArrayField(numpy.ones([2, 3]))
array2 = ArrayField(numpy.ones([1, 5]))
empty_array = array1.empty_field()
list_field = ListField([array1, array2, empty_array])
returned_tensor = (
list_field.as_tensor(list_field.get_padding_lengths()).detach().cpu().numpy()
)
correct_tensor = numpy.array(
[
[[1.0, 1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 1.0, 0.0, 0.0]],
[[1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0, 0.0]],
[[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]],
]
)
numpy.testing.assert_array_equal(returned_tensor, correct_tensor)
def test_padding_handles_list_fields_with_padding_values(self):
array1 = ArrayField(numpy.ones([2, 3]), padding_value=-1)
array2 = ArrayField(numpy.ones([1, 5]), padding_value=-1)
empty_array = array1.empty_field()
list_field = ListField([array1, array2, empty_array])
returned_tensor = (
list_field.as_tensor(list_field.get_padding_lengths()).detach().cpu().numpy()
)
correct_tensor = numpy.array(
[
[[1.0, 1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, -1.0, -1.0]],
[[1.0, 1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]],
[[-1.0, -1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0]],
]
)
numpy.testing.assert_array_equal(returned_tensor, correct_tensor)
def test_printing_doesnt_crash(self):
array = ArrayField(numpy.ones([2, 3]), padding_value=-1)
print(array)
def test_as_tensor_works_with_scalar(self):
array = ArrayField(numpy.asarray(42))
returned_tensor = array.as_tensor(array.get_padding_lengths())
current_tensor = numpy.asarray(42)
numpy.testing.assert_array_equal(returned_tensor, current_tensor)
def test_as_tensor_with_scalar_keeps_dtype(self):
array = ArrayField(numpy.asarray(42, dtype=numpy.float32))
returned_tensor = array.as_tensor(array.get_padding_lengths())
assert returned_tensor.dtype == torch.float32
def test_alternative_dtypes(self):
shape = [3, 4, 5, 6]
array = numpy.zeros(shape)
# Setting dtype to numpy.int64 should produce a torch.LongTensor when field is converted to
# a tensor
array_field1 = ArrayField(array, dtype=numpy.int64)
returned_tensor1 = array_field1.as_tensor(array_field1.get_padding_lengths())
assert returned_tensor1.dtype == torch.int64
# Setting dtype to numpy.uint8 should produce a torch.ByteTensor when field is converted to
# a tensor
array_field2 = ArrayField(array, dtype=numpy.uint8)
returned_tensor2 = array_field2.as_tensor(array_field2.get_padding_lengths())
assert returned_tensor2.dtype == torch.uint8
# Padding should not affect dtype
padding_lengths = {"dimension_" + str(i): 10 for i, _ in enumerate(shape)}
padded_tensor = array_field2.as_tensor(padding_lengths)
assert padded_tensor.dtype == torch.uint8
# Empty fields should have the same dtype
empty_field = array_field2.empty_field()
assert empty_field.dtype == array_field2.dtype
def test_len_works_with_scalar(self):
array = ArrayField(numpy.asarray(42))
assert len(array) == 1
def test_eq(self):
array1 = ArrayField(numpy.asarray([1, 1, 1]))
array2 = ArrayField(numpy.asarray([[1, 1, 1], [1, 1, 1]]))
array3 = ArrayField(numpy.asarray([1, 1, 2]))
array4 = ArrayField(numpy.asarray([1, 1, 1]))
assert array1 != array2
assert array1 != array3
assert array1 == array4
| allennlp-master | tests/data/fields/array_field_test.py |
from typing import Dict
import numpy
import torch
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary, Instance
from allennlp.data.fields import TextField, LabelField, ListField, IndexField, SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer
from allennlp.data.dataloader import PyTorchDataLoader
from allennlp.data.dataset_readers.dataset_reader import AllennlpDataset
from allennlp.data.tokenizers import SpacyTokenizer
from allennlp.models import Model
from allennlp.modules import Embedding
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
class DummyModel(Model):
"""
Performs a common operation (embedding) that won't work on an empty tensor.
Returns an arbitrary loss.
"""
def __init__(self, vocab: Vocabulary) -> None:
super().__init__(vocab)
weight = torch.ones(vocab.get_vocab_size(), 10)
token_embedding = Embedding(
num_embeddings=vocab.get_vocab_size(), embedding_dim=10, weight=weight, trainable=False
)
self.embedder = BasicTextFieldEmbedder({"words": token_embedding})
def forward( # type: ignore
self, list_tensor: Dict[str, torch.LongTensor]
) -> Dict[str, torch.Tensor]:
self.embedder(list_tensor)
return {"loss": 1.0}
class TestListField(AllenNlpTestCase):
def setup_method(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("this", "words")
self.vocab.add_token_to_namespace("is", "words")
self.vocab.add_token_to_namespace("a", "words")
self.vocab.add_token_to_namespace("sentence", "words")
self.vocab.add_token_to_namespace("s", "characters")
self.vocab.add_token_to_namespace("e", "characters")
self.vocab.add_token_to_namespace("n", "characters")
self.vocab.add_token_to_namespace("t", "characters")
self.vocab.add_token_to_namespace("c", "characters")
for label in ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k"]:
self.vocab.add_token_to_namespace(label, "labels")
self.word_indexer = {"words": SingleIdTokenIndexer("words")}
self.words_and_characters_indexers = {
"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
}
self.field1 = TextField(
[Token(t) for t in ["this", "is", "a", "sentence"]], self.word_indexer
)
self.field2 = TextField(
[Token(t) for t in ["this", "is", "a", "different", "sentence"]], self.word_indexer
)
self.field3 = TextField(
[Token(t) for t in ["this", "is", "another", "sentence"]], self.word_indexer
)
self.empty_text_field = self.field1.empty_field()
self.index_field = IndexField(1, self.field1)
self.empty_index_field = self.index_field.empty_field()
self.sequence_label_field = SequenceLabelField([1, 1, 0, 1], self.field1)
self.empty_sequence_label_field = self.sequence_label_field.empty_field()
tokenizer = SpacyTokenizer()
tokens = tokenizer.tokenize("Foo")
text_field = TextField(tokens, self.word_indexer)
empty_list_field = ListField([text_field.empty_field()])
empty_fields = {"list_tensor": empty_list_field}
self.empty_instance = Instance(empty_fields)
non_empty_list_field = ListField([text_field])
non_empty_fields = {"list_tensor": non_empty_list_field}
self.non_empty_instance = Instance(non_empty_fields)
super().setup_method()
def test_get_padding_lengths(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
lengths = list_field.get_padding_lengths()
assert lengths == {"num_fields": 3, "list_words___tokens": 5}
def test_list_field_can_handle_empty_text_fields(self):
list_field = ListField([self.field1, self.field2, self.empty_text_field])
list_field.index(self.vocab)
tensor_dict = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(
tensor_dict["words"]["tokens"].detach().cpu().numpy(),
numpy.array([[2, 3, 4, 5, 0], [2, 3, 4, 1, 5], [0, 0, 0, 0, 0]]),
)
def test_list_field_can_handle_empty_index_fields(self):
list_field = ListField([self.index_field, self.index_field, self.empty_index_field])
list_field.index(self.vocab)
tensor = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(
tensor.detach().cpu().numpy(), numpy.array([[1], [1], [-1]])
)
def test_list_field_can_handle_empty_sequence_label_fields(self):
list_field = ListField(
[self.sequence_label_field, self.sequence_label_field, self.empty_sequence_label_field]
)
list_field.index(self.vocab)
tensor = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_equal(
tensor.detach().cpu().numpy(), numpy.array([[1, 1, 0, 1], [1, 1, 0, 1], [0, 0, 0, 0]])
)
def test_all_fields_padded_to_max_length(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
tensor_dict = list_field.as_tensor(list_field.get_padding_lengths())
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][0].detach().cpu().numpy(), numpy.array([2, 3, 4, 5, 0])
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][1].detach().cpu().numpy(), numpy.array([2, 3, 4, 1, 5])
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][2].detach().cpu().numpy(), numpy.array([2, 3, 1, 5, 0])
)
def test_nested_list_fields_are_padded_correctly(self):
nested_field1 = ListField([LabelField(c) for c in ["a", "b", "c", "d", "e"]])
nested_field2 = ListField([LabelField(c) for c in ["f", "g", "h", "i", "j", "k"]])
list_field = ListField([nested_field1.empty_field(), nested_field1, nested_field2])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
assert padding_lengths == {"num_fields": 3, "list_num_fields": 6}
tensor = list_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_almost_equal(
tensor, [[-1, -1, -1, -1, -1, -1], [0, 1, 2, 3, 4, -1], [5, 6, 7, 8, 9, 10]]
)
def test_fields_can_pad_to_greater_than_max_length(self):
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
padding_lengths["list_words___tokens"] = 7
padding_lengths["num_fields"] = 5
tensor_dict = list_field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][0].detach().cpu().numpy(),
numpy.array([2, 3, 4, 5, 0, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][1].detach().cpu().numpy(),
numpy.array([2, 3, 4, 1, 5, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][2].detach().cpu().numpy(),
numpy.array([2, 3, 1, 5, 0, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][3].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]),
)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"][4].detach().cpu().numpy(),
numpy.array([0, 0, 0, 0, 0, 0, 0]),
)
def test_as_tensor_can_handle_multiple_token_indexers(self):
self.field1._token_indexers = self.words_and_characters_indexers
self.field2._token_indexers = self.words_and_characters_indexers
self.field3._token_indexers = self.words_and_characters_indexers
list_field = ListField([self.field1, self.field2, self.field3])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
tensor_dict = list_field.as_tensor(padding_lengths)
words = tensor_dict["words"]["tokens"].detach().cpu().numpy()
characters = tensor_dict["characters"]["token_characters"].detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(
words, numpy.array([[2, 3, 4, 5, 0], [2, 3, 4, 1, 5], [2, 3, 1, 5, 0]])
)
numpy.testing.assert_array_almost_equal(
characters[0],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
),
)
numpy.testing.assert_array_almost_equal(
characters[1],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 3, 1, 3, 4, 5],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
]
),
)
numpy.testing.assert_array_almost_equal(
characters[2],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 4, 1, 5, 1, 3, 1, 0, 0],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
),
)
def test_as_tensor_can_handle_multiple_token_indexers_and_empty_fields(self):
self.field1._token_indexers = self.words_and_characters_indexers
self.field2._token_indexers = self.words_and_characters_indexers
self.field3._token_indexers = self.words_and_characters_indexers
list_field = ListField([self.field1.empty_field(), self.field1, self.field2])
list_field.index(self.vocab)
padding_lengths = list_field.get_padding_lengths()
tensor_dict = list_field.as_tensor(padding_lengths)
words = tensor_dict["words"]["tokens"].detach().cpu().numpy()
characters = tensor_dict["characters"]["token_characters"].detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(
words, numpy.array([[0, 0, 0, 0, 0], [2, 3, 4, 5, 0], [2, 3, 4, 1, 5]])
)
numpy.testing.assert_array_almost_equal(characters[0], numpy.zeros([5, 9]))
numpy.testing.assert_array_almost_equal(
characters[1],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0],
]
),
)
numpy.testing.assert_array_almost_equal(
characters[2],
numpy.array(
[
[5, 1, 1, 2, 0, 0, 0, 0, 0],
[1, 2, 0, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0],
[1, 1, 1, 1, 3, 1, 3, 4, 5],
[2, 3, 4, 5, 3, 4, 6, 3, 0],
]
),
)
def test_printing_doesnt_crash(self):
list_field = ListField([self.field1, self.field2])
print(list_field)
def test_sequence_methods(self):
list_field = ListField([self.field1, self.field2, self.field3])
assert len(list_field) == 3
assert list_field[1] == self.field2
assert [f for f in list_field] == [self.field1, self.field2, self.field3]
def test_empty_list_can_be_tensorized(self):
tokenizer = SpacyTokenizer()
tokens = tokenizer.tokenize("Foo")
text_field = TextField(tokens, self.word_indexer)
list_field = ListField([text_field.empty_field()])
fields = {
"list": list_field,
"bar": TextField(tokenizer.tokenize("BAR"), self.word_indexer),
}
instance = Instance(fields)
instance.index_fields(self.vocab)
instance.as_tensor_dict()
def test_batch_with_some_empty_lists_works(self):
dataset = AllennlpDataset([self.empty_instance, self.non_empty_instance], self.vocab)
model = DummyModel(self.vocab)
model.eval()
loader = PyTorchDataLoader(dataset, batch_size=2)
batch = next(iter(loader))
model.forward(**batch)
# This use case may seem a bit peculiar. It's intended for situations where
# you have sparse inputs that are used as additional features for some
# prediction, and they are sparse enough that they can be empty for some
# cases. It would be silly to try to handle these as None in your model; it
# makes a whole lot more sense to just have a minimally-sized tensor that
# gets entirely masked and has no effect on the rest of the model.
def test_batch_of_entirely_empty_lists_works(self):
dataset = AllennlpDataset([self.empty_instance, self.empty_instance], self.vocab)
model = DummyModel(self.vocab)
model.eval()
loader = PyTorchDataLoader(dataset, batch_size=2)
batch = next(iter(loader))
model.forward(**batch)
def test_list_of_text_padding(self):
from allennlp.data.token_indexers import PretrainedTransformerIndexer
from allennlp.data.tokenizers import Token
from allennlp.data.fields import (
TextField,
ListField,
)
from allennlp.data import Vocabulary
word_indexer = {"tokens": PretrainedTransformerIndexer("albert-base-v2")}
text_field = TextField(
[
Token(t, text_id=2, type_id=1)
for t in ["▁allen", "n", "lp", "▁has", "▁no", "▁bugs", "."]
],
word_indexer,
)
list_field = ListField([text_field])
vocab = Vocabulary()
list_field.index(vocab)
padding_lengths = {
"list_tokens___mask": 10,
"list_tokens___token_ids": 10,
"list_tokens___type_ids": 10,
"num_fields": 2,
}
tensors = list_field.as_tensor(padding_lengths)["tokens"]
assert tensors["mask"].size() == (2, 10)
assert tensors["mask"][0, 0] == True # noqa: E712
assert tensors["mask"][0, 9] == False # noqa: E712
assert (tensors["mask"][1, :] == False).all() # noqa: E712
assert tensors["token_ids"].size() == (2, 10)
assert tensors["token_ids"][0, 0] == 2
assert tensors["token_ids"][0, 9] == 0
assert (tensors["token_ids"][1, :] == 0).all()
assert tensors["type_ids"].size() == (2, 10)
assert tensors["type_ids"][0, 0] == 1
assert tensors["type_ids"][0, 9] == 0
assert (tensors["type_ids"][1, :] == 0).all()
| allennlp-master | tests/data/fields/list_field_test.py |
from collections import defaultdict
from typing import Dict, List
import numpy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer, TokenCharactersIndexer, TokenIndexer
class DictReturningTokenIndexer(TokenIndexer):
"""
A stub TokenIndexer that returns multiple arrays of different lengths.
"""
def count_vocab_items(self, token: Token, counter: Dict[str, Dict[str, int]]):
pass
def tokens_to_indices(
self, tokens: List[Token], vocabulary: Vocabulary
) -> Dict[str, List[int]]:
return {
"token_ids": (
[10, 15]
+ [vocabulary.get_token_index(token.text, "words") for token in tokens]
+ [25]
),
"additional_key": [22, 29],
}
class TestTextField(AllenNlpTestCase):
def setup_method(self):
self.vocab = Vocabulary()
self.vocab.add_token_to_namespace("sentence", namespace="words")
self.vocab.add_token_to_namespace("A", namespace="words")
self.vocab.add_token_to_namespace("A", namespace="characters")
self.vocab.add_token_to_namespace("s", namespace="characters")
self.vocab.add_token_to_namespace("e", namespace="characters")
self.vocab.add_token_to_namespace("n", namespace="characters")
self.vocab.add_token_to_namespace("t", namespace="characters")
self.vocab.add_token_to_namespace("c", namespace="characters")
super().setup_method()
def test_field_counts_vocab_items_correctly(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["words"]["This"] == 1
assert namespace_token_counts["words"]["is"] == 1
assert namespace_token_counts["words"]["a"] == 1
assert namespace_token_counts["words"]["sentence"] == 1
assert namespace_token_counts["words"]["."] == 1
assert list(namespace_token_counts.keys()) == ["words"]
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1)
},
)
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["characters"]["T"] == 1
assert namespace_token_counts["characters"]["h"] == 1
assert namespace_token_counts["characters"]["i"] == 2
assert namespace_token_counts["characters"]["s"] == 3
assert namespace_token_counts["characters"]["a"] == 1
assert namespace_token_counts["characters"]["e"] == 3
assert namespace_token_counts["characters"]["n"] == 2
assert namespace_token_counts["characters"]["t"] == 1
assert namespace_token_counts["characters"]["c"] == 1
assert namespace_token_counts["characters"]["."] == 1
assert list(namespace_token_counts.keys()) == ["characters"]
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
},
)
namespace_token_counts = defaultdict(lambda: defaultdict(int))
field.count_vocab_items(namespace_token_counts)
assert namespace_token_counts["characters"]["T"] == 1
assert namespace_token_counts["characters"]["h"] == 1
assert namespace_token_counts["characters"]["i"] == 2
assert namespace_token_counts["characters"]["s"] == 3
assert namespace_token_counts["characters"]["a"] == 1
assert namespace_token_counts["characters"]["e"] == 3
assert namespace_token_counts["characters"]["n"] == 2
assert namespace_token_counts["characters"]["t"] == 1
assert namespace_token_counts["characters"]["c"] == 1
assert namespace_token_counts["characters"]["."] == 1
assert namespace_token_counts["words"]["This"] == 1
assert namespace_token_counts["words"]["is"] == 1
assert namespace_token_counts["words"]["a"] == 1
assert namespace_token_counts["words"]["sentence"] == 1
assert namespace_token_counts["words"]["."] == 1
assert set(namespace_token_counts.keys()) == {"words", "characters"}
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
sentence_index = vocab.add_token_to_namespace("sentence", namespace="words")
capital_a_index = vocab.add_token_to_namespace("A", namespace="words")
capital_a_char_index = vocab.add_token_to_namespace("A", namespace="characters")
s_index = vocab.add_token_to_namespace("s", namespace="characters")
e_index = vocab.add_token_to_namespace("e", namespace="characters")
n_index = vocab.add_token_to_namespace("n", namespace="characters")
t_index = vocab.add_token_to_namespace("t", namespace="characters")
c_index = vocab.add_token_to_namespace("c", namespace="characters")
field = TextField(
[Token(t) for t in ["A", "sentence"]],
{"words": SingleIdTokenIndexer(namespace="words")},
)
field.index(vocab)
assert field._indexed_tokens["words"]["tokens"] == [capital_a_index, sentence_index]
field1 = TextField(
[Token(t) for t in ["A", "sentence"]],
{"characters": TokenCharactersIndexer(namespace="characters", min_padding_length=1)},
)
field1.index(vocab)
assert field1._indexed_tokens["characters"]["token_characters"] == [
[capital_a_char_index],
[s_index, e_index, n_index, t_index, e_index, n_index, c_index, e_index],
]
field2 = TextField(
[Token(t) for t in ["A", "sentence"]],
token_indexers={
"words": SingleIdTokenIndexer(namespace="words"),
"characters": TokenCharactersIndexer(namespace="characters", min_padding_length=1),
},
)
field2.index(vocab)
assert field2._indexed_tokens["words"]["tokens"] == [capital_a_index, sentence_index]
assert field2._indexed_tokens["characters"]["token_characters"] == [
[capital_a_char_index],
[s_index, e_index, n_index, t_index, e_index, n_index, c_index, e_index],
]
def test_get_padding_lengths_raises_if_no_indexed_tokens(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
with pytest.raises(ConfigurationError):
field.get_padding_lengths()
def test_padding_lengths_are_computed_correctly(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {"words___tokens": 5}
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1)
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
"characters___token_characters": 5,
"characters___num_token_characters": 8,
}
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
"words": SingleIdTokenIndexer("words"),
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
"characters___token_characters": 5,
"characters___num_token_characters": 8,
"words___tokens": 5,
}
def test_as_tensor_handles_words(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"].detach().cpu().numpy(), numpy.array([1, 1, 1, 2, 1])
)
def test_as_tensor_handles_longer_lengths(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={"words": SingleIdTokenIndexer("words")},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths["words___tokens"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"].detach().cpu().numpy(),
numpy.array([1, 1, 1, 2, 1, 0, 0, 0, 0, 0]),
)
def test_as_tensor_handles_characters(self):
field = TextField(
[Token(t) for t in ["This", "is", "a", "sentence", "."]],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1)
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
expected_character_array = numpy.array(
[
[1, 1, 1, 3, 0, 0, 0, 0],
[1, 3, 0, 0, 0, 0, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4],
[1, 0, 0, 0, 0, 0, 0, 0],
]
)
numpy.testing.assert_array_almost_equal(
tensor_dict["characters"]["token_characters"].detach().cpu().numpy(),
expected_character_array,
)
def test_as_tensor_handles_characters_if_empty_field(self):
field = TextField(
[],
token_indexers={
"characters": TokenCharactersIndexer("characters", min_padding_length=1)
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
tensor_dict = field.as_tensor(padding_lengths)
expected_character_array = numpy.array([])
numpy.testing.assert_array_almost_equal(
tensor_dict["characters"]["token_characters"].detach().cpu().numpy(),
expected_character_array,
)
def test_as_tensor_handles_words_and_characters_with_longer_lengths(self):
field = TextField(
[Token(t) for t in ["a", "sentence", "."]],
token_indexers={
"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
padding_lengths["words___tokens"] = 5
padding_lengths["characters___token_characters"] = 5
padding_lengths["characters___num_token_characters"] = 10
tensor_dict = field.as_tensor(padding_lengths)
numpy.testing.assert_array_almost_equal(
tensor_dict["words"]["tokens"].detach().cpu().numpy(), numpy.array([1, 2, 1, 0, 0])
)
numpy.testing.assert_array_almost_equal(
tensor_dict["characters"]["token_characters"].detach().cpu().numpy(),
numpy.array(
[
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[3, 4, 5, 6, 4, 5, 7, 4, 0, 0],
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
),
)
def test_printing_doesnt_crash(self):
field = TextField(
[Token(t) for t in ["A", "sentence"]],
{"words": SingleIdTokenIndexer(namespace="words")},
)
print(field)
def test_token_indexer_returns_dict(self):
field = TextField(
[Token(t) for t in ["A", "sentence"]],
token_indexers={
"field_with_dict": DictReturningTokenIndexer(),
"words": SingleIdTokenIndexer("words"),
"characters": TokenCharactersIndexer("characters", min_padding_length=1),
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
"field_with_dict___token_ids": 5,
"field_with_dict___additional_key": 2,
"words___tokens": 2,
"characters___token_characters": 2,
"characters___num_token_characters": 8,
}
padding_lengths["field_with_dict___token_ids"] = 7
padding_lengths["field_with_dict___additional_key"] = 3
padding_lengths["words___tokens"] = 4
padding_lengths["characters___token_characters"] = 4
tensors = field.as_tensor(padding_lengths)
assert list(tensors["field_with_dict"]["token_ids"].shape) == [7]
assert list(tensors["field_with_dict"]["additional_key"].shape) == [3]
assert list(tensors["words"]["tokens"].shape) == [4]
assert list(tensors["characters"]["token_characters"].shape) == [4, 8]
def test_token_padding_lengths_are_computed_correctly(self):
field = TextField(
[Token(t) for t in ["A", "sentence"]],
token_indexers={
"field_with_dict": DictReturningTokenIndexer(token_min_padding_length=3),
"words": SingleIdTokenIndexer("words", token_min_padding_length=3),
"characters": TokenCharactersIndexer(
"characters", min_padding_length=1, token_min_padding_length=3
),
},
)
field.index(self.vocab)
padding_lengths = field.get_padding_lengths()
assert padding_lengths == {
"field_with_dict___token_ids": 5,
"field_with_dict___additional_key": 3,
"words___tokens": 3,
"characters___token_characters": 3,
"characters___num_token_characters": 8,
}
tensors = field.as_tensor(padding_lengths)
assert tensors["field_with_dict"]["additional_key"].tolist()[-1] == 0
assert tensors["words"]["tokens"].tolist()[-1] == 0
assert tensors["characters"]["token_characters"].tolist()[-1] == [0] * 8
def test_sequence_methods(self):
field = TextField([Token(t) for t in ["This", "is", "a", "sentence", "."]], {})
assert len(field) == 5
assert field[1].text == "is"
assert [token.text for token in field] == ["This", "is", "a", "sentence", "."]
| allennlp-master | tests/data/fields/text_field_test.py |
allennlp-master | tests/data/fields/__init__.py |
|
import logging
import numpy
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import MultiLabelField
from allennlp.data.vocabulary import Vocabulary
class TestMultiLabelField(AllenNlpTestCase):
def test_as_tensor_returns_integer_tensor(self):
f = MultiLabelField([2, 3], skip_indexing=True, label_namespace="test1", num_labels=5)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().tolist()
assert tensor == [0, 0, 1, 1, 0]
assert {type(item) for item in tensor} == {int}
def test_multilabel_field_can_index_with_vocab(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("rel0", namespace="rel_labels")
vocab.add_token_to_namespace("rel1", namespace="rel_labels")
vocab.add_token_to_namespace("rel2", namespace="rel_labels")
f = MultiLabelField(["rel1", "rel0"], label_namespace="rel_labels")
f.index(vocab)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([1, 1, 0]))
def test_multilabel_field_raises_with_non_integer_labels_and_no_indexing(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField(["non integer field"], skip_indexing=True)
def test_multilabel_field_raises_with_no_indexing_and_missing_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([0, 2], skip_indexing=True, num_labels=None)
def test_multilabel_field_raises_with_no_indexing_and_wrong_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([0, 2, 4], skip_indexing=True, num_labels=3)
def test_multilabel_field_raises_with_incorrect_label_type(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([1, 2], skip_indexing=False)
def test_multilabel_field_raises_with_given_num_labels(self):
with pytest.raises(ConfigurationError):
_ = MultiLabelField([1, 2], skip_indexing=False, num_labels=4)
def test_multilabel_field_empty_field_works(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("label1", namespace="test_empty_labels")
vocab.add_token_to_namespace("label2", namespace="test_empty_labels")
f = MultiLabelField([], label_namespace="test_empty_labels")
f.index(vocab)
tensor = f.as_tensor(f.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0]))
g = f.empty_field()
g.index(vocab)
tensor = g.as_tensor(g.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0]))
h = MultiLabelField(
[0, 0, 1], label_namespace="test_empty_labels", num_labels=3, skip_indexing=True
)
tensor = h.empty_field().as_tensor(None).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 0, 0]))
def test_class_variables_for_namespace_warnings_work_correctly(self, caplog):
with caplog.at_level(logging.WARNING, logger="allennlp.data.fields.multilabel_field"):
assert "text" not in MultiLabelField._already_warned_namespaces
_ = MultiLabelField(["test"], label_namespace="text")
assert caplog.records
# We've warned once, so we should have set the class variable to False.
assert "text" in MultiLabelField._already_warned_namespaces
caplog.clear()
_ = MultiLabelField(["test2"], label_namespace="text")
assert not caplog.records
# ... but a new namespace should still log a warning.
assert "text2" not in MultiLabelField._already_warned_namespaces
caplog.clear()
_ = MultiLabelField(["test"], label_namespace="text2")
assert caplog
def test_printing_doesnt_crash(self):
field = MultiLabelField(["label"], label_namespace="namespace")
print(field)
| allennlp-master | tests/data/fields/multilabel_field_test.py |
import logging
import pytest
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import LabelField
from allennlp.data import Vocabulary
class TestLabelField(AllenNlpTestCase):
def test_as_tensor_returns_integer_tensor(self):
label = LabelField(5, skip_indexing=True)
tensor = label.as_tensor(label.get_padding_lengths())
assert tensor.item() == 5
def test_label_field_can_index_with_vocab(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("entailment", namespace="labels")
vocab.add_token_to_namespace("contradiction", namespace="labels")
vocab.add_token_to_namespace("neutral", namespace="labels")
label = LabelField("entailment")
label.index(vocab)
tensor = label.as_tensor(label.get_padding_lengths())
assert tensor.item() == 0
def test_label_field_raises_with_non_integer_labels_and_no_indexing(self):
with pytest.raises(ConfigurationError):
_ = LabelField("non integer field", skip_indexing=True)
def test_label_field_raises_with_incorrect_label_type(self):
with pytest.raises(ConfigurationError):
_ = LabelField([], skip_indexing=False)
def test_label_field_empty_field_works(self):
label = LabelField("test")
empty_label = label.empty_field()
assert empty_label.label == -1
def test_class_variables_for_namespace_warnings_work_correctly(self, caplog):
with caplog.at_level(logging.WARNING, logger="allennlp.data.fields.label_field"):
assert "text" not in LabelField._already_warned_namespaces
_ = LabelField("test", label_namespace="text")
assert caplog.records
# We've warned once, so we should have set the class variable to False.
assert "text" in LabelField._already_warned_namespaces
caplog.clear()
_ = LabelField("test2", label_namespace="text")
assert not caplog.records
# ... but a new namespace should still log a warning.
assert "text2" not in LabelField._already_warned_namespaces
caplog.clear()
_ = LabelField("test", label_namespace="text2")
assert caplog.records
def test_printing_doesnt_crash(self):
label = LabelField("label", label_namespace="namespace")
print(label)
| allennlp-master | tests/data/fields/label_field_test.py |
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data.fields import AdjacencyField, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data import Vocabulary, Token
class TestAdjacencyField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.text = TextField(
[Token(t) for t in ["here", "is", "a", "sentence", "."]],
{"words": SingleIdTokenIndexer("words")},
)
def test_adjacency_field_can_index_with_vocab(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("a", namespace="labels")
vocab.add_token_to_namespace("b", namespace="labels")
vocab.add_token_to_namespace("c", namespace="labels")
labels = ["a", "b"]
indices = [(0, 1), (2, 1)]
adjacency_field = AdjacencyField(indices, self.text, labels)
adjacency_field.index(vocab)
tensor = adjacency_field.as_tensor(adjacency_field.get_padding_lengths())
numpy.testing.assert_equal(
tensor.numpy(),
numpy.array(
[
[-1, 0, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, 1, -1, -1, -1],
[-1, -1, -1, -1, -1],
[-1, -1, -1, -1, -1],
]
),
)
def test_adjacency_field_raises_with_out_of_bounds_indices(self):
with pytest.raises(ConfigurationError):
_ = AdjacencyField([(0, 24)], self.text)
def test_adjacency_field_raises_with_mismatching_labels_for_indices(self):
with pytest.raises(ConfigurationError):
_ = AdjacencyField([(0, 1), (0, 2)], self.text, ["label1"])
def test_adjacency_field_raises_with_duplicate_indices(self):
with pytest.raises(ConfigurationError):
_ = AdjacencyField([(0, 1), (0, 1)], self.text, ["label1"])
def test_adjacency_field_empty_field_works(self):
field = AdjacencyField([(0, 1)], self.text)
empty_field = field.empty_field()
assert empty_field.indices == []
def test_printing_doesnt_crash(self):
adjacency_field = AdjacencyField([(0, 1)], self.text, ["label1"])
print(adjacency_field)
| allennlp-master | tests/data/fields/adjacency_field_test.py |
import numpy
import pytest
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token
from allennlp.data.fields import TextField, SpanField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestSpanField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.indexers = {"words": SingleIdTokenIndexer("words")}
self.text = TextField(
[Token(t) for t in ["here", "is", "a", "sentence", "for", "spans", "."]], self.indexers
)
def test_as_tensor_converts_span_field_correctly(self):
span_field = SpanField(2, 3, self.text)
tensor = span_field.as_tensor(span_field.get_padding_lengths()).detach().cpu().numpy()
numpy.testing.assert_array_equal(tensor, numpy.array([2, 3]))
def test_span_field_raises_on_incorrect_label_type(self):
with pytest.raises(TypeError):
_ = SpanField("hello", 3, self.text)
def test_span_field_raises_on_ill_defined_span(self):
with pytest.raises(ValueError):
_ = SpanField(4, 1, self.text)
def test_span_field_raises_if_span_end_is_greater_than_sentence_length(self):
with pytest.raises(ValueError):
_ = SpanField(1, 30, self.text)
def test_empty_span_field_works(self):
span_field = SpanField(1, 3, self.text)
empty_span = span_field.empty_field()
assert empty_span.span_start == -1
assert empty_span.span_end == -1
def test_printing_doesnt_crash(self):
span_field = SpanField(2, 3, self.text)
print(span_field)
def test_equality(self):
span_field1 = SpanField(2, 3, self.text)
span_field2 = SpanField(2, 3, self.text)
span_field3 = SpanField(
2, 3, TextField([Token(t) for t in ["not", "the", "same", "tokens"]], self.indexers)
)
assert span_field1 == (2, 3)
assert span_field1 == span_field1
assert span_field1 == span_field2
assert span_field1 != span_field3
assert span_field2 != span_field3
| allennlp-master | tests/data/fields/span_field_test.py |
from allennlp.data.fields import Field
def test_eq_with_inheritance():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
__slots__ = ["b"]
def __init__(self, a, b):
super().__init__(a)
self.b = b
class SubSubSubField(SubSubField):
__slots__ = ["c"]
def __init__(self, a, b, c):
super().__init__(a, b)
self.c = c
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
assert SubSubSubField(1, 2, 3) == SubSubSubField(1, 2, 3)
assert SubSubSubField(1, 2, 3) != SubSubSubField(0, 2, 3)
def test_eq_with_inheritance_for_non_slots_field():
class SubField(Field):
def __init__(self, a):
self.a = a
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
def test_eq_with_inheritance_for_mixed_field():
class SubField(Field):
__slots__ = ["a"]
def __init__(self, a):
self.a = a
class SubSubField(SubField):
def __init__(self, a, b):
super().__init__(a)
self.b = b
assert SubField(1) == SubField(1)
assert SubField(1) != SubField(2)
assert SubSubField(1, 2) == SubSubField(1, 2)
assert SubSubField(1, 2) != SubSubField(1, 1)
assert SubSubField(1, 2) != SubSubField(2, 2)
| allennlp-master | tests/data/fields/field_test.py |
from collections import defaultdict
import logging
import pytest
import numpy
from allennlp.common.checks import ConfigurationError
from allennlp.common.testing import AllenNlpTestCase
from allennlp.data import Token, Vocabulary
from allennlp.data.fields import TextField, SequenceLabelField
from allennlp.data.token_indexers import SingleIdTokenIndexer
class TestSequenceLabelField(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.text = TextField(
[Token(t) for t in ["here", "are", "some", "words", "."]],
{"words": SingleIdTokenIndexer("words")},
)
def test_tag_length_mismatch_raises(self):
with pytest.raises(ConfigurationError):
wrong_tags = ["B", "O", "O"]
_ = SequenceLabelField(wrong_tags, self.text)
def test_count_vocab_items_correctly_indexes_tags(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
counter = defaultdict(lambda: defaultdict(int))
sequence_label_field.count_vocab_items(counter)
assert counter["labels"]["B"] == 1
assert counter["labels"]["I"] == 1
assert counter["labels"]["O"] == 3
assert set(counter.keys()) == {"labels"}
def test_index_converts_field_correctly(self):
vocab = Vocabulary()
b_index = vocab.add_token_to_namespace("B", namespace="*labels")
i_index = vocab.add_token_to_namespace("I", namespace="*labels")
o_index = vocab.add_token_to_namespace("O", namespace="*labels")
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="*labels")
sequence_label_field.index(vocab)
assert sequence_label_field._indexed_labels == [b_index, i_index, o_index, o_index, o_index]
def test_as_tensor_produces_integer_targets(self):
vocab = Vocabulary()
vocab.add_token_to_namespace("B", namespace="*labels")
vocab.add_token_to_namespace("I", namespace="*labels")
vocab.add_token_to_namespace("O", namespace="*labels")
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="*labels")
sequence_label_field.index(vocab)
padding_lengths = sequence_label_field.get_padding_lengths()
tensor = sequence_label_field.as_tensor(padding_lengths).detach().cpu().numpy()
numpy.testing.assert_array_almost_equal(tensor, numpy.array([0, 1, 2, 2, 2]))
def test_sequence_label_field_raises_on_incorrect_type(self):
with pytest.raises(ConfigurationError):
_ = SequenceLabelField([[], [], [], [], []], self.text)
def test_class_variables_for_namespace_warnings_work_correctly(self, caplog):
with caplog.at_level(logging.WARNING, logger="allennlp.data.fields.sequence_label_field"):
tags = ["B", "I", "O", "O", "O"]
assert "text" not in SequenceLabelField._already_warned_namespaces
_ = SequenceLabelField(tags, self.text, label_namespace="text")
assert caplog.records
# We've warned once, so we should have set the class variable to False.
assert "text" in SequenceLabelField._already_warned_namespaces
caplog.clear()
_ = SequenceLabelField(tags, self.text, label_namespace="text")
assert not caplog.records
# ... but a new namespace should still log a warning.
assert "text2" not in SequenceLabelField._already_warned_namespaces
caplog.clear()
_ = SequenceLabelField(tags, self.text, label_namespace="text2")
assert caplog.records
def test_printing_doesnt_crash(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
print(sequence_label_field)
def test_sequence_methods(self):
tags = ["B", "I", "O", "O", "O"]
sequence_label_field = SequenceLabelField(tags, self.text, label_namespace="labels")
assert len(sequence_label_field) == 5
assert sequence_label_field[1] == "I"
assert [label for label in sequence_label_field] == tags
| allennlp-master | tests/data/fields/sequence_label_field_test.py |
import pytest
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.data.fields import MetadataField
class TestMetadataField(AllenNlpTestCase):
def test_mapping_works_with_dict(self):
field = MetadataField({"a": 1, "b": [0]})
assert "a" in field
assert field["a"] == 1
assert len(field) == 2
keys = {k for k in field}
assert keys == {"a", "b"}
values = [v for v in field.values()]
assert len(values) == 2
assert 1 in values
assert [0] in values
def test_mapping_raises_with_non_dict(self):
field = MetadataField(0)
with pytest.raises(TypeError):
_ = field[0]
with pytest.raises(TypeError):
_ = len(field)
with pytest.raises(TypeError):
_ = [x for x in field]
| allennlp-master | tests/data/fields/metadata_field_test.py |
import pytest
from allennlp.common.testing.test_case import AllenNlpTestCase
from allennlp.data.fields import FlagField
class TestFlagField(AllenNlpTestCase):
def test_get_padding_lengths_returns_nothing(self):
flag_field = FlagField(True)
assert flag_field.get_padding_lengths() == {}
def test_as_tensor_just_returns_value(self):
for value in [True, 3.234, "this is a string"]:
assert FlagField(value).as_tensor({}) == value
def test_printing_doesnt_crash(self):
flag = FlagField(True)
print(flag)
def test_batch_tensors_returns_single_value(self):
value = True
fields = [FlagField(value) for _ in range(5)]
values = [field.as_tensor({}) for field in fields]
batched_value = fields[0].batch_tensors(values)
assert batched_value == value
def test_batch_tensors_crashes_with_non_uniform_values(self):
field = FlagField(True)
with pytest.raises(ValueError):
field.batch_tensors([True, False, True])
with pytest.raises(ValueError):
field.batch_tensors([1, 2, 3, 4])
with pytest.raises(ValueError):
field.batch_tensors(["different", "string", "flags"])
| allennlp-master | tests/data/fields/flag_field_test.py |
allennlp-master | benchmarks/__init__.py |
|
import torch
from allennlp.nn import util
from allennlp.common.testing import requires_gpu
@requires_gpu
def bench_add_sentence_boundary_token_ids(benchmark):
device = torch.device("cuda")
# shape: (32, 50)
tensor = torch.tensor([[3] * 50] * 32, device=device)
# shape: (32, 50)
mask = torch.tensor([[True] * 50, [True] * 30 + [False] * 20] * 16, device=device)
begin_token = 1
end_token = 2
benchmark(util.add_sentence_boundary_token_ids, tensor, mask, begin_token, end_token)
@requires_gpu
def bench_remove_sentence_boundaries(benchmark):
device = torch.device("cuda")
# shape: (32, 50, 1)
tensor = torch.tensor([[3] * 50] * 32, device=device).unsqueeze(-1)
# shape: (32, 50)
mask = torch.tensor([[True] * 50, [True] * 30 + [False] * 20] * 16, device=device)
benchmark(util.remove_sentence_boundaries, tensor, mask)
@requires_gpu
def bench_create_tensor_then_send_to_device(benchmark):
device = torch.device("cuda:0")
def create_tensor():
return torch.rand((32, 50)).to(device)
benchmark(create_tensor)
@requires_gpu
def bench_create_tensor_directly_on_device(benchmark):
device = torch.device("cuda:0")
def create_tensor():
return torch.rand((32, 50), device=device)
benchmark(create_tensor)
| allennlp-master | benchmarks/nn/util_bench.py |
allennlp-master | benchmarks/data/__init__.py |
|
allennlp-master | benchmarks/data/tokenizers/__init__.py |
|
from allennlp.data.tokenizers import CharacterTokenizer
tokenizer = CharacterTokenizer()
passage = (
"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor "
"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis "
"nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. "
"Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu "
"fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in "
"culpa qui officia deserunt mollit anim id est laborum."
)
def bench_character_tokenizer(benchmark):
benchmark(tokenizer.tokenize, passage)
| allennlp-master | benchmarks/data/tokenizers/character_tokenizer_bench.py |
# encoding: utf-8
"""
Prepares markdown release notes for GitHub releases.
"""
import os
from typing import List
from allennlp.version import VERSION
TAG = os.environ["TAG"]
ADDED_HEADER = "### Added 🎉"
CHANGED_HEADER = "### Changed ⚠️"
FIXED_HEADER = "### Fixed ✅"
REMOVED_HEADER = "### Removed 👋"
def get_change_log_notes() -> str:
in_current_section = False
current_section_notes: List[str] = []
with open("CHANGELOG.md") as changelog:
for line in changelog:
if line.startswith("## "):
if line.startswith("## Unreleased"):
continue
if line.startswith(f"## [{TAG}]"):
in_current_section = True
continue
break
if in_current_section:
if line.startswith("### Added"):
line = ADDED_HEADER + "\n"
elif line.startswith("### Changed"):
line = CHANGED_HEADER + "\n"
elif line.startswith("### Fixed"):
line = FIXED_HEADER + "\n"
elif line.startswith("### Removed"):
line = REMOVED_HEADER + "\n"
current_section_notes.append(line)
assert current_section_notes
return "## What's new\n\n" + "".join(current_section_notes).strip() + "\n"
def get_commit_history() -> str:
stream = os.popen(
f"git log $(git describe --always --tags --abbrev=0 {TAG}^^)..{TAG}^ --oneline"
)
return "## Commits\n\n" + stream.read()
def main():
assert TAG == f"v{VERSION}"
print(get_change_log_notes())
print(get_commit_history())
if __name__ == "__main__":
main()
| allennlp-master | scripts/release_notes.py |
#!/usr/bin/env python
"""
This script is used to populate the table of contents for the API in the mkdocs config file.
"""
import argparse
from pathlib import Path
from typing import Any, List
from ruamel.yaml import YAML
from allennlp.version import VERSION
API_TOC_KEY = "API"
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("target_yaml", help="Path to the target mkdocs config file.")
parser.add_argument("source_yaml", help="Path to the mkdocs skeleton config file.")
parser.add_argument("docs_root", help="The root of the markdown docs folder.")
parser.add_argument(
"api_docs_path", help="The root of the API docs within the markdown docs root folder."
)
parser.add_argument("--docs-version", type=str, default=f"v{VERSION}")
return parser.parse_args()
def build_api_toc(source_path: Path, docs_root: Path):
nav_entries: List[Any] = []
for child in source_path.iterdir():
if child.is_dir():
nav_subsection = build_api_toc(child, docs_root)
elif child.suffix == ".md":
nav_subsection = str(child.relative_to(docs_root))
nav_entries.append({child.stem: nav_subsection})
nav_entries.sort(key=lambda x: list(x)[0], reverse=False)
return nav_entries
def main():
yaml = YAML()
opts = parse_args()
source_yaml = yaml.load(Path(opts.source_yaml))
nav_entries = build_api_toc(Path(opts.api_docs_path), Path(opts.docs_root))
# Add version to name.
source_yaml["site_name"] = f"AllenNLP {opts.docs_version}"
# Find the yaml sub-object corresponding to the API table of contents.
site_nav = source_yaml["nav"]
for nav_obj in site_nav:
if API_TOC_KEY in nav_obj:
break
nav_obj[API_TOC_KEY] = nav_entries
with open(opts.target_yaml, "w") as f:
yaml.dump(source_yaml, f)
print(f"{opts.target_yaml} created")
if __name__ == "__main__":
main()
| allennlp-master | scripts/build_docs_config.py |
#!/usr/bin/env python
import glob
import logging
import os
import re
import shutil
import sys
import tempfile
sys.path.insert(0, os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
from allennlp.commands.test_install import _get_module_root
from allennlp.commands.train import train_model_from_file, train_model
from allennlp.common import Params
from allennlp.common.util import pushd
logger = logging.getLogger(__name__)
def train_fixture(config_prefix: str, config_filename: str = "experiment.json") -> None:
config_file = config_prefix + config_filename
serialization_dir = config_prefix + "serialization"
# Train model doesn't like it if we have incomplete serialization
# directories, so remove them if they exist.
if os.path.exists(serialization_dir):
shutil.rmtree(serialization_dir)
# train the model
train_model_from_file(config_file, serialization_dir)
# remove unnecessary files
shutil.rmtree(os.path.join(serialization_dir, "log"))
for filename in glob.glob(os.path.join(serialization_dir, "*")):
if (
filename.endswith(".log")
or filename.endswith(".json")
or re.search(r"epoch_[0-9]+\.th$", filename)
):
os.remove(filename)
def train_fixture_gpu(config_prefix: str) -> None:
config_file = config_prefix + "experiment.json"
serialization_dir = config_prefix + "serialization"
params = Params.from_file(config_file)
params["trainer"]["cuda_device"] = 0
# train this one to a tempdir
tempdir = tempfile.gettempdir()
train_model(params, tempdir)
# now copy back the weights and and archived model
shutil.copy(os.path.join(tempdir, "best.th"), os.path.join(serialization_dir, "best_gpu.th"))
shutil.copy(
os.path.join(tempdir, "model.tar.gz"), os.path.join(serialization_dir, "model_gpu.tar.gz")
)
if __name__ == "__main__":
module_root = _get_module_root().parent
with pushd(module_root, verbose=True):
models = [
("basic_classifier", "experiment_seq2seq.jsonnet"),
"simple_tagger",
"simple_tagger_with_elmo",
"simple_tagger_with_span_f1",
]
for model in models:
if isinstance(model, tuple):
model, config_filename = model
train_fixture(f"allennlp/tests/fixtures/{model}/", config_filename)
else:
train_fixture(f"allennlp/tests/fixtures/{model}/")
| allennlp-master | scripts/train_fixtures.py |
#!/usr/bin/env python
# encoding: UTF-8
"""
Goes through all the inline-links in markdown files and reports the breakages.
"""
import re
import sys
import pathlib
import os
from multiprocessing.dummy import Pool
from typing import Tuple, NamedTuple, Optional
import requests
OK_STATUS_CODES = (
200,
401, # the resource exists but may require some sort of login.
403, # ^ same
405, # HEAD method not allowed.
406, # the resource exists, but our default 'Accept-' header may not match what the server can provide.
)
THREADS = 10
http_session = requests.Session()
for resource_prefix in ("http://", "https://"):
http_session.mount(
resource_prefix,
requests.adapters.HTTPAdapter(max_retries=5, pool_connections=20, pool_maxsize=THREADS),
)
class MatchTuple(NamedTuple):
source: str
name: str
link: str
def url_ok(match_tuple: MatchTuple) -> Tuple[bool, str]:
"""Check if a URL is reachable."""
try:
result = http_session.head(match_tuple.link, timeout=5, allow_redirects=True)
return (
result.ok or result.status_code in OK_STATUS_CODES,
f"status code = {result.status_code}",
)
except (requests.ConnectionError, requests.Timeout):
return False, "connection error"
def path_ok(match_tuple: MatchTuple) -> bool:
"""Check if a file in this repository exists."""
relative_path = match_tuple.link.split("#")[0]
full_path = os.path.join(os.path.dirname(str(match_tuple.source)), relative_path)
return os.path.exists(full_path)
def link_ok(match_tuple: MatchTuple) -> Tuple[MatchTuple, bool, Optional[str]]:
reason: Optional[str] = None
if match_tuple.link.startswith("http"):
result_ok, reason = url_ok(match_tuple)
else:
result_ok = path_ok(match_tuple)
print(f" {'✓' if result_ok else '✗'} {match_tuple.link}")
return match_tuple, result_ok, reason
def main():
print("Finding all markdown files in the current directory...")
project_root = (pathlib.Path(__file__).parent / "..").resolve()
markdown_files = project_root.glob("**/*.md")
all_matches = set()
url_regex = re.compile(r"\[([^!][^\]]+)\]\(([^)(]+)\)")
for markdown_file in markdown_files:
with open(markdown_file) as handle:
for line in handle.readlines():
matches = url_regex.findall(line)
for name, link in matches:
if "localhost" not in link:
all_matches.add(MatchTuple(source=str(markdown_file), name=name, link=link))
print(f" {len(all_matches)} markdown files found")
print("Checking to make sure we can retrieve each link...")
with Pool(processes=THREADS) as pool:
results = pool.map(link_ok, [match for match in list(all_matches)])
unreachable_results = [
(match_tuple, reason) for match_tuple, success, reason in results if not success
]
if unreachable_results:
print(f"Unreachable links ({len(unreachable_results)}):")
for match_tuple, reason in unreachable_results:
print(" > Source: " + match_tuple.source)
print(" Name: " + match_tuple.name)
print(" Link: " + match_tuple.link)
if reason is not None:
print(" Reason: " + reason)
sys.exit(1)
print("No Unreachable link found.")
if __name__ == "__main__":
main()
| allennlp-master | scripts/check_links.py |
from datetime import datetime as dt
import os
from github import Github
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("allenai/allennlp")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
if (
issue.milestone is None
and issue.assignees
and issue.pull_request is None
and (dt.utcnow() - issue.updated_at).days >= 14
):
assignees = ", ".join([f"@{user.login}" for user in issue.assignees])
print(f"Pinging {assignees} for {issue}")
issue.create_comment(
f"{assignees} this is just a friendly ping to make sure you "
"haven't forgotten about this issue 😜"
)
if __name__ == "__main__":
main()
| allennlp-master | scripts/ping_issue_assignees.py |
#!/usr/bin/env python
import argparse
from typing import Dict
import requests
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("version_type", choices=["stable", "latest", "current"])
return parser.parse_args()
def get_current_version() -> str:
VERSION: Dict[str, str] = {}
with open("allennlp/version.py", "r") as version_file:
exec(version_file.read(), VERSION)
return "v" + VERSION["VERSION"]
def get_latest_version() -> str:
resp = requests.get("https://api.github.com/repos/allenai/allennlp/tags")
return resp.json()[0]["name"]
def get_stable_version() -> str:
resp = requests.get("https://api.github.com/repos/allenai/allennlp/releases/latest")
return resp.json()["tag_name"]
def main() -> None:
opts = parse_args()
if opts.version_type == "stable":
print(get_stable_version())
elif opts.version_type == "latest":
print(get_latest_version())
elif opts.version_type == "current":
print(get_current_version())
else:
raise NotImplementedError
if __name__ == "__main__":
main()
| allennlp-master | scripts/get_version.py |
from datetime import datetime as dt
import os
from github import Github
LABELS_TO_EXEMPT = ["contributions welcome", "merge when ready", "under development", "help wanted"]
def main():
g = Github(os.environ["GITHUB_TOKEN"])
repo = g.get_repo("allenai/allennlp")
open_issues = repo.get_issues(state="open")
for issue in open_issues:
if (
issue.milestone is None
and not issue.assignees
and issue.pull_request is None
and (dt.utcnow() - issue.updated_at).days > 7
and (dt.utcnow() - issue.created_at).days >= 14
and not any(label.name.lower() in LABELS_TO_EXEMPT for label in issue.get_labels())
):
print("Closing", issue)
issue.create_comment(
"This issue is being closed due to lack of activity. "
"If you think it still needs to be addressed, please comment on this thread 👇"
)
issue.add_to_labels("stale")
issue.edit(state="closed")
if __name__ == "__main__":
main()
| allennlp-master | scripts/close_stale_issues.py |
#!/usr/bin/env python
"""
Turn docstrings from a single module into a markdown file.
We do this with PydocMarkdown, using custom processors and renderers defined here.
"""
import argparse
from collections import OrderedDict
from dataclasses import dataclass
from enum import Enum
import logging
from multiprocessing import Pool, cpu_count
import os
from pathlib import Path
import re
import sys
from typing import Optional, Tuple, List
from nr.databind.core import Struct
from nr.interface import implements, override
from pydoc_markdown import PydocMarkdown
from pydoc_markdown.contrib.loaders.python import PythonLoader
from pydoc_markdown.contrib.renderers.markdown import MarkdownRenderer
from pydoc_markdown.interfaces import Processor, Renderer
from pydoc_markdown.reflection import Argument, Module, Function, Class, Data
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("py2md")
BASE_MODULE = os.environ.get("BASE_MODULE", "allennlp")
BASE_SOURCE_LINK = os.environ.get(
"BASE_SOURCE_LINK", "https://github.com/allenai/allennlp/blob/master/allennlp/"
)
class DocstringError(Exception):
pass
def emphasize(s: str) -> str:
# Need to escape underscores.
s = s.replace("_", "\\_")
return f"__{s}__"
class Section(Enum):
ARGUMENTS = "ARGUMENTS"
PARAMETERS = "PARAMETERS"
ATTRIBUTES = "ATTRIBUTES"
MEMBERS = "MEMBERS"
RETURNS = "RETURNS"
RAISES = "RAISES"
EXAMPLES = "EXAMPLES"
OTHER = "OTHER"
@classmethod
def from_str(cls, section: str) -> "Section":
section = section.upper()
for member in cls:
if section == member.value:
return member
return cls.OTHER
REQUIRED_PARAM_RE = re.compile(r"^`([^`]+)`(, required\.?)?$")
OPTIONAL_PARAM_RE = re.compile(
r"^`([^`]+)`,?\s+(optional,?\s)?\(\s?(optional,\s)?default\s?=\s?`([^`]+)`\s?\)\.?$"
)
OPTIONAL_PARAM_NO_DEFAULT_RE = re.compile(r"^`([^`]+)`,?\s+optional\.?$")
@dataclass
class Param:
ident: str
ty: Optional[str] = None
required: bool = False
default: Optional[str] = None
@classmethod
def from_line(cls, line: str) -> Optional["Param"]:
if ":" not in line:
return None
ident, description = line.split(":", 1)
ident = ident.strip()
description = description.strip()
if " " in ident:
return None
maybe_match = REQUIRED_PARAM_RE.match(description)
if maybe_match:
ty = maybe_match.group(1)
return cls(ident=ident, ty=ty, required=True)
maybe_match = OPTIONAL_PARAM_RE.match(description)
if maybe_match:
ty = maybe_match.group(1)
default = maybe_match.group(4)
return cls(ident=ident, ty=ty, required=False, default=default)
maybe_match = OPTIONAL_PARAM_NO_DEFAULT_RE.match(description)
if maybe_match:
ty = maybe_match.group(1)
return cls(ident=ident, ty=ty, required=False)
raise DocstringError(
f"Invalid parameter / attribute description: '{line}'\n"
"Make sure types are enclosed in backticks.\n"
"Required parameters should be documented like: '{ident} : `{type}`'\n"
"Optional parameters should be documented like: '{ident} : `{type}`, optional (default = `{expr}`)'\n"
)
def to_line(self) -> str:
line: str = f"- {emphasize(self.ident)} :"
if self.ty:
line += f" `{self.ty}`"
if not self.required:
line += ", optional"
if self.default:
line += f" (default = `{self.default}`)"
line += " <br>"
return line
# For now we handle attributes / members in the same way as parameters / arguments.
Attrib = Param
@dataclass
class RetVal:
description: Optional[str] = None
ident: Optional[str] = None
ty: Optional[str] = None
@classmethod
def from_line(cls, line: str) -> "RetVal":
if ": " not in line:
return cls(description=line)
ident, ty = line.split(":", 1)
ident = ident.strip()
ty = ty.strip()
if ty and not ty.startswith("`"):
raise DocstringError(f"Type should be enclosed in backticks: '{line}'")
return cls(ident=ident, ty=ty)
def to_line(self) -> str:
if self.description:
line = f"- {self.description} <br>"
elif self.ident:
line = f"- {emphasize(self.ident)}"
if self.ty:
line += f" : {self.ty} <br>"
else:
line += " <br>"
else:
raise DocstringError("RetVal must have either description or ident")
return line
@dataclass
class ProcessorState:
parameters: "OrderedDict[str, Param]"
current_section: Optional[Section] = None
codeblock_opened: bool = False
consecutive_blank_line_count: int = 0
@implements(Processor)
class AllenNlpDocstringProcessor(Struct):
"""
Use to turn our docstrings into Markdown.
"""
CROSS_REF_RE = re.compile("(:(class|func|mod):`~?([a-zA-Z0-9_.]+)`)")
UNDERSCORE_HEADER_RE = re.compile(r"(.*)\n-{3,}\n")
MULTI_LINE_LINK_RE = re.compile(r"(\[[^\]]+\])\n\s*(\([^\)]+\))")
@override
def process(self, graph, resolver):
graph.visit(self.process_node)
def process_node(self, node):
if not getattr(node, "docstring", None):
return
lines: List[str] = []
state: ProcessorState = ProcessorState(parameters=OrderedDict())
docstring = node.docstring
# Standardize header syntax to use '#' instead of underscores.
docstring = self.UNDERSCORE_HEADER_RE.sub(r"# \g<1>", docstring)
# It's common to break up markdown links into multiple lines in docstrings, but
# they won't render as links in the doc HTML unless they are all on one line.
docstring = self.MULTI_LINE_LINK_RE.sub(r"\g<1>\g<2>", docstring)
for line in docstring.split("\n"):
# Check if we're starting or ending a codeblock.
if line.startswith("```"):
state.codeblock_opened = not state.codeblock_opened
if not state.codeblock_opened:
# If we're not in a codeblock, we'll do some pre-processing.
if not line.strip():
state.consecutive_blank_line_count += 1
if state.consecutive_blank_line_count >= 2:
state.current_section = None
else:
state.consecutive_blank_line_count = 0
line = self._preprocess_line(line, state)
lines.append(line)
# Now set the docstring to our preprocessed version of it.
node.docstring = "\n".join(lines)
def _preprocess_line(self, line, state: ProcessorState) -> str:
match = re.match(r"#+ (.*)$", line)
if match:
state.current_section = Section.from_str(match.group(1).strip())
line = re.sub(r"#+ (.*)$", r"<strong>\1</strong>\n", line)
else:
if line and not line.startswith(" ") and not line.startswith("!!! "):
if state.current_section in (
Section.ARGUMENTS,
Section.PARAMETERS,
):
param = Param.from_line(line)
if param:
line = param.to_line()
elif state.current_section in (Section.ATTRIBUTES, Section.MEMBERS):
attrib = Attrib.from_line(line)
if attrib:
line = attrib.to_line()
elif state.current_section in (Section.RETURNS, Section.RAISES):
retval = RetVal.from_line(line)
line = retval.to_line()
line = self._transform_cross_references(line)
return line
def _transform_cross_references(self, line: str) -> str:
"""
Replace sphinx style crossreferences with markdown links.
"""
for match, ty, name in self.CROSS_REF_RE.findall(line):
if name.startswith(f"{BASE_MODULE}."):
path = name.split(".")
if ty == "mod":
href = "/api/" + "/".join(path[1:])
else:
href = "/api/" + "/".join(path[1:-1]) + "#" + path[-1].lower()
cross_ref = f"[`{path[-1]}`]({href})"
elif "." not in name:
cross_ref = f"[`{name}`](#{name.lower()})"
else:
cross_ref = f"`{name}`"
line = line.replace(match, cross_ref)
return line
@implements(Processor)
class AllenNlpFilterProcessor(Struct):
"""
Used to filter out nodes that we don't want to document.
"""
PRIVATE_METHODS_TO_KEEP = {"DatasetReader._read", "__call__"}
def process(self, graph, _resolver):
graph.visit(self._process_node)
def _process_node(self, node):
def _check(node):
if node.name.startswith("_"):
if node.name in self.PRIVATE_METHODS_TO_KEEP:
return True
if (
node.parent
and f"{node.parent.name}.{node.name}" in self.PRIVATE_METHODS_TO_KEEP
):
return True
return False
if node.parent and node.parent.name.startswith("_"):
return False
if node.name == "logger" and isinstance(node.parent, Module):
return False
return True
if not _check(node):
node.visible = False
@implements(Renderer)
class AllenNlpRenderer(MarkdownRenderer):
def _format_function_signature(
self,
func: Function,
override_name: str = None,
add_method_bar: bool = True,
include_parent_class: bool = True,
) -> str:
parts = []
for dec in func.decorators:
parts.append("@{}{}\n".format(dec.name, dec.args or ""))
if self.signature_python_help_style and not func.is_method():
parts.append("{} = ".format(func.path()))
if func.is_async:
parts.append("async ")
if self.signature_with_def:
parts.append("def ")
if self.signature_class_prefix and (
func.is_function() and func.parent and func.parent.is_class()
):
parts.append(func.parent.name + ".")
parts.append((override_name or func.name))
signature_args = Argument.format_arglist(func.args)
if signature_args.endswith(","):
signature_args = signature_args[:-1].strip()
if (
len(parts[-1])
+ len(signature_args)
+ (0 if not func.return_ else len(str(func.return_)))
> 60
):
signature_args = ",\n ".join(
filter(lambda s: s.strip() not in ("", ","), (str(arg) for arg in func.args))
)
parts.append("(\n " + signature_args + "\n)")
else:
parts.append("(" + signature_args + ")")
if func.return_:
parts.append(" -> {}".format(func.return_))
result = "".join(parts)
if add_method_bar and func.is_method():
result = "\n".join(" | " + line for line in result.split("\n"))
if include_parent_class:
bases = ", ".join(map(str, func.parent.bases))
if func.parent.metaclass:
bases += ", metaclass=" + str(func.parent.metaclass)
if bases:
class_signature = f"class {func.parent.name}({bases})"
else:
class_signature = f"class {func.parent.name}"
result = f"{class_signature}:\n | ...\n{result}"
return result
def _format_data_signature(self, data: Data) -> str:
expr = str(data.expr)
if len(expr) > self.data_expression_maxlength:
expr = expr[: self.data_expression_maxlength] + " ..."
if data.annotation:
signature = f"{data.name}: {data.annotation} = {expr}"
else:
signature = f"{data.name} = {expr}"
if data.parent and data.parent.is_class():
bases = ", ".join(map(str, data.parent.bases))
if data.parent.metaclass:
bases += ", metaclass=" + str(data.parent.metaclass)
if bases:
class_signature = f"class {data.parent.name}({bases})"
else:
class_signature = f"class {data.parent.name}"
return f"{class_signature}:\n | ...\n | {signature}"
else:
return signature
def _format_classdef_signature(self, cls: Class) -> str:
code = ""
if cls.decorators:
for dec in cls.decorators:
code += "@{}{}\n".format(dec.name, dec.args or "")
bases = ", ".join(map(str, cls.bases))
if cls.metaclass:
bases += ", metaclass=" + str(cls.metaclass)
if bases:
code += "class {}({})".format(cls.name, bases)
else:
code += "class {}".format(cls.name)
if self.signature_python_help_style:
code = cls.path() + " = " + code
if self.classdef_render_init_signature_if_needed and (
"__init__" in cls.members and not cls.members["__init__"].visible
):
code += ":\n" + self._format_function_signature(
cls.members["__init__"],
add_method_bar=True,
include_parent_class=False,
)
return code
def _render_module_breadcrumbs(self, fp, mod: Module):
submods = mod.name.split(".")
breadcrumbs = []
for i, submod_name in enumerate(submods):
if i == 0:
title = f"<i>{submod_name}</i>"
elif i == len(submods) - 1:
title = f"<strong>.{submod_name}</strong>"
else:
title = f"<i>.{submod_name}</i>"
breadcrumbs.append(title)
"/".join(submods[1:])
source_link = BASE_SOURCE_LINK + "/".join(submods[1:]) + ".py"
fp.write(
"<div>\n"
' <p class="alignleft">' + "".join(breadcrumbs) + "</p>\n"
f' <p class="alignright"><a class="sourcelink" href="{source_link}">[SOURCE]</a></p>\n'
"</div>\n"
'<div style="clear: both;"></div>\n\n---\n\n'
)
def _render_object(self, fp, level, obj):
if not isinstance(obj, Module) or self.render_module_header:
self._render_header(fp, level, obj)
if isinstance(obj, Module):
self._render_module_breadcrumbs(fp, obj)
self._render_signature_block(fp, obj)
if obj.docstring:
lines = obj.docstring.split("\n")
if self.docstrings_as_blockquote:
lines = ["> " + x for x in lines]
fp.write("\n".join(lines))
fp.write("\n\n")
def py2md(module: str, out: Optional[str] = None) -> bool:
"""
Returns `True` if module successfully processed, otherwise `False`.
"""
logger.debug("Processing %s", module)
pydocmd = PydocMarkdown(
loaders=[PythonLoader(modules=[module])],
processors=[AllenNlpFilterProcessor(), AllenNlpDocstringProcessor()],
renderer=AllenNlpRenderer(
filename=out,
add_method_class_prefix=False,
add_member_class_prefix=False,
data_code_block=True,
signature_with_def=True,
use_fixed_header_levels=False,
render_module_header=False,
descriptive_class_title=False,
),
)
if out:
out_path = Path(out)
os.makedirs(out_path.parent, exist_ok=True)
pydocmd.load_modules()
try:
pydocmd.process()
except DocstringError as err:
logger.exception("Failed to process %s.\n%s", module, err)
return False
pydocmd.render()
return True
def _py2md_wrapper(x: Tuple[str, str]) -> bool:
"""
Used to wrap py2md since we can't pickle a lambda (needed for multiprocessing).
"""
return py2md(x[0], x[1])
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("modules", nargs="+", type=str, help="""The Python modules to parse.""")
parser.add_argument(
"-o",
"--out",
nargs="+",
type=str,
help="""Output files.
If given, must have the same number of items as 'modules'.
If not given, stdout is used.""",
)
return parser.parse_args()
def main():
opts = parse_args()
outputs = opts.out if opts.out else [None] * len(opts.modules)
if len(outputs) != len(opts.modules):
raise ValueError("Number inputs and outputs should be the same.")
n_threads = cpu_count()
errors: int = 0
if len(opts.modules) > n_threads and opts.out:
# If writing to files, can process in parallel.
chunk_size = max([1, int(len(outputs) / n_threads)])
logger.info("Using %d threads", n_threads)
with Pool(n_threads) as p:
for result in p.imap(_py2md_wrapper, zip(opts.modules, outputs), chunk_size):
if not result:
errors += 1
else:
# If writing to stdout, need to process sequentially. Otherwise the output
# could get intertwined.
for module, out in zip(opts.modules, outputs):
result = py2md(module, out)
if not result:
errors += 1
logger.info("Processed %d modules", len(opts.modules))
if errors:
logger.error("Found %d errors", errors)
sys.exit(1)
if __name__ == "__main__":
main()
| allennlp-master | scripts/py2md.py |
from typing import Optional
import pytest
from allennlp.common.testing import AllenNlpTestCase
from scripts.py2md import py2md, Param, DocstringError
class TestPy2md(AllenNlpTestCase):
def test_basic_example(self, capsys):
py2md("scripts.tests.py2md.basic_example")
captured = capsys.readouterr()
with open(
self.PROJECT_ROOT / "scripts" / "tests" / "py2md" / "basic_example_expected_output.md"
) as f:
expected = f.read()
assert captured.out.split("\n") == expected.split("\n")
@pytest.mark.parametrize(
"line_in, line_out",
[
(
"a : `int`, optional (default = `None`)",
"- __a__ : `int`, optional (default = `None`) <br>",
),
(
"foo : `Tuple[int, ...]`, optional (default = `()`)",
"- __foo__ : `Tuple[int, ...]`, optional (default = `()`) <br>",
),
("a : `int`, required", "- __a__ : `int` <br>"),
("a : `int`", "- __a__ : `int` <br>"),
("_a : `int`", "- __\\_a__ : `int` <br>"),
("a_ : `int`", "- __a\\___ : `int` <br>"),
],
)
def test_param_from_and_to_line(line_in: str, line_out: Optional[str]):
param = Param.from_line(line_in)
assert param is not None
assert param.to_line() == line_out
@pytest.mark.parametrize(
"line",
[
"a : `int`, optional (default = None)",
"a : `int`, optional (default = `None)",
"a : `int`, optional (default = None`)",
"a : int",
"a : `int",
"a : int`",
],
)
def test_param_from_bad_line_raises(line: str):
with pytest.raises(DocstringError):
Param.from_line(line)
| allennlp-master | scripts/tests/py2md/py2md_test.py |
"""
This is a docstring.
And this is a multi-line line: [http://example.com]
(https://example.com/blah/blah/blah.html).
"""
from dataclasses import dataclass
SOME_GLOBAL_VAR = "Ahhhh I'm a global var!!"
"""
This is a global var.
"""
def func_with_no_args():
"""
This function has no args.
"""
return None
def func_with_args(a: int, b: int, c: int = 3) -> int:
"""
This function has some args.
# Parameters
a : `int`
A number.
b : `int`
Another number.
c : `int`, optional (default = `3`)
Yet another number.
Notes
-----
These are some notes.
# Returns
`int`
The result of `a + b * c`.
"""
return a + b * c
class SomeClass:
"""
I'm a class!
# Parameters
x : `float`
This attribute is called `x`.
"""
some_class_level_variable = 1
"""
This is how you document a class-level variable.
"""
some_class_level_var_with_type: int = 1
def __init__(self) -> None:
self.x = 1.0
def _private_method(self) -> None:
"""
Private methods should not be included in documentation.
"""
pass
def some_method(self) -> None:
"""
I'm a method!
But I don't do anything.
# Returns
`None`
"""
return None
def method_with_alternative_return_section(self) -> int:
"""
Another method.
# Returns
A completely arbitrary number.
"""
return 3
def method_with_alternative_return_section3(self) -> int:
"""
Another method.
# Returns
number : `int`
A completely arbitrary number.
"""
return 3
class AnotherClassWithReallyLongConstructor:
def __init__(
self,
a_really_long_argument_name: int = 0,
another_long_name: float = 2,
these_variable_names_are_terrible: str = "yea I know",
**kwargs,
) -> None:
self.a = a_really_long_argument_name
self.b = another_long_name
self.c = these_variable_names_are_terrible
self.other = kwargs
@dataclass
class ClassWithDecorator:
x: int
class _PrivateClass:
def public_method_on_private_class(self):
"""
This should not be documented since the class is private.
"""
pass
| allennlp-master | scripts/tests/py2md/basic_example.py |
import pytest
import sqlite3
from unittest.mock import call, Mock
from allennlp.common.testing import AllenNlpTestCase
from scripts.ai2_internal.resume_daemon import (
BeakerStatus,
create_table,
handler,
logger,
resume,
start_autoresume,
)
# Don't spam the log in tests.
logger.removeHandler(handler)
class ResumeDaemonTest(AllenNlpTestCase):
def setup_method(self):
super().setup_method()
self.connection = sqlite3.connect(":memory:")
create_table(self.connection)
def test_create_beaker_status_works(self):
status = BeakerStatus("stopped")
assert status.name == "stopped"
def test_create_beaker_status_throws(self):
with pytest.raises(ValueError):
status = BeakerStatus("garbage")
assert status.name == "garbage"
def test_does_nothing_on_empty_db(self):
beaker = Mock()
resume(self.connection, beaker)
assert not beaker.method_calls
def test_does_not_resume_a_running_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.running
resume(self.connection, beaker)
beaker.get_status.assert_called()
assert len(beaker.method_calls) == 1
def test_does_not_resume_a_finished_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.succeeded
resume(self.connection, beaker)
beaker.get_status.assert_called()
assert len(beaker.method_calls) == 1
def test_does_resume_a_preempted_experiment(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
beaker.resume.return_value = "foo2"
resume(self.connection, beaker)
beaker.get_status.assert_called()
beaker.resume.assert_called()
assert len(beaker.method_calls) == 2
def test_respects_upper_bound_on_resumes(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
for i in range(10):
beaker.resume.return_value = f"foo{i}"
resume(self.connection, beaker)
calls = [
call.get_status("foo"),
call.resume("foo"),
call.get_status("foo0"),
call.resume("foo0"),
call.get_status("foo1"),
call.resume("foo1"),
call.get_status("foo2"),
call.resume("foo2"),
call.get_status("foo3"),
call.resume("foo3"),
call.get_status("foo4"),
]
beaker.assert_has_calls(calls)
def test_handles_a_realistic_scenario(self):
beaker = Mock()
experiment_id = "foo"
start_autoresume(self.connection, experiment_id, 5)
beaker.get_status.return_value = BeakerStatus.preempted
for i in range(10):
beaker.resume.return_value = f"foo{i}"
if i == 2:
beaker.get_status.return_value = BeakerStatus.succeeded
resume(self.connection, beaker)
calls = [
call.get_status("foo"),
call.resume("foo"),
call.get_status("foo0"),
call.resume("foo0"),
call.get_status("foo1"),
]
beaker.assert_has_calls(calls)
| allennlp-master | scripts/tests/ai2_internal/resume_daemon_test.py |
#! /usr/bin/env python3
# Tool to automatically resume preemptible beaker experiments created with run_with_beaker.py.
#
# Examples
# --------
#
# Ensure an experiment will be resumed:
# resume_daemon.py --action=start --experiment-id=$YOUR_EXPERIMENT_ID
#
# Stop resuming an experiment:
# resume_daemon.py --action=stop --experiment-id=$YOUR_EXPERIMENT_ID
#
# Details
# -------
#
# In order to operate, resume_daemon.py does the following:
#
# 1. Modifies the user's crontab.
# 2. Maintains a SQLite DB in ~/.allennlp/resume.db.
# 3. Keeps logs in ~/.allennlp/resume.log.
#
# The reliance on crontab means that resumes will only occur when the running
# system is powered on. Longer term Beaker is planning on adding this
# functionality to their service directly, which will obsolete this tool.
import argparse
import json
import logging
import os
import random
import sqlite3
import subprocess
import time
from enum import Enum
from logging.handlers import RotatingFileHandler
from sqlite3 import Connection
from subprocess import PIPE
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
fmt="%(asctime)s %(levelname)-8s %(message)s", datefmt="%Y-%m-%d %H:%M:%S"
)
dot_allennlp_dir = f"{os.environ['HOME']}/.allennlp"
# Special case for users that haven't run AllenNLP locally.
if not os.path.exists(dot_allennlp_dir):
os.mkdir(dot_allennlp_dir)
handler = RotatingFileHandler(
f"{dot_allennlp_dir}/resume.log", maxBytes=1024 * 1024, backupCount=10
)
handler.setFormatter(formatter)
logger.addHandler(handler)
BEAKER_QUERY_INTERVAL_SECONDS = 1.0
# See https://github.com/beaker/client/blob/master/api/task_status.go
class BeakerStatus(Enum):
submitted = "submitted"
provisioning = "provisioning"
initializing = "initializing"
running = "running"
terminating = "terminating"
preempted = "preempted"
succeeded = "succeeded"
skipped = "skipped"
stopped = "stopped"
failed = "failed"
def __str__(self):
return self.name
def is_end_state(self):
if self is BeakerStatus.preempted:
return True
elif self is BeakerStatus.succeeded:
return True
elif self is BeakerStatus.skipped:
return True
elif self is BeakerStatus.stopped:
return True
elif self is BeakerStatus.failed:
return True
else:
return False
class BeakerWrapper:
def get_status(self, experiment_id: str) -> BeakerStatus:
command = ["beaker", "experiment", "inspect", experiment_id]
experiment_json = subprocess.check_output(command)
# Example output from beaker.
# brendanr.local$ beaker experiment inspect ex_g7knlblsjxxk
# [
# {
# "id": "ex_g7knlblsjxxk",
# "owner": {
# "id": "us_a4hw8yvr3xut",
# "name": "ai2",
# "displayName": "AI2"
# },
# "author": {
# "id": "us_hl8x796649u9",
# "name": "brendanr",
# "displayName": "Brendan Roof"
# },
# "workspace": "",
# "user": {
# "id": "",
# "name": "",
# "displayName": ""
# },
# "nodes": [
# {
# "name": "training",
# "task_id": "",
# "taskId": "tk_64wm85lc3f0m",
# "result_id": "",
# "resultId": "ds_du02un92r57b",
# "status": "initializing",
# "child_task_ids": null,
# "childTaskIds": [],
# "parent_task_ids": null,
# "parentTaskIds": []
# }
# ],
# "created": "2019-09-25T02:03:30.820437Z",
# "archived": false
# }
# ]
experiment_data = json.loads(experiment_json)
# Beaker lets there be multiple tasks in a single experiment. Here we
# just try to handle the simple case of single task experiments like
# those created by run_with_beaker.py.
assert len(experiment_data) == 1, "Experiment not created with run_with_beaker.py"
assert (
len(experiment_data[0]["nodes"]) == 1
), "Experiment not created with run_with_beaker.py"
status = BeakerStatus(experiment_data[0]["nodes"][0]["status"])
# Small delay to avoid thrashing Beaker.
time.sleep(BEAKER_QUERY_INTERVAL_SECONDS)
return status
def resume(self, experiment_id: str) -> str:
command = ["beaker", "experiment", "resume", f"--experiment-name={experiment_id}"]
# Small delay to avoid thrashing Beaker.
time.sleep(BEAKER_QUERY_INTERVAL_SECONDS)
return subprocess.check_output(command, universal_newlines=True).strip()
def create_table(connection: Connection) -> None:
cursor = connection.cursor()
create_table_statement = """
CREATE TABLE active_experiments
(experiment_id TEXT PRIMARY KEY, original_id TEXT, max_resumes INTEGER, current_resume INTEGER)
"""
cursor.execute(create_table_statement)
connection.commit()
def start_autoresume(connection: Connection, experiment_id: str, max_resumes: int) -> None:
cursor = connection.cursor()
cursor.execute(
"INSERT INTO active_experiments VALUES (?, ?, ?, ?)",
(experiment_id, experiment_id, max_resumes, 0),
)
connection.commit()
def stop_autoresume(connection: Connection, experiment_id: str) -> None:
cursor = connection.cursor()
cursor.execute("SELECT * FROM active_experiments WHERE experiment_id = ?", (experiment_id,))
result = cursor.fetchall()
assert result, f"Experiment {experiment_id} not found!"
cursor.execute("DELETE FROM active_experiments WHERE experiment_id = ?", (experiment_id,))
connection.commit()
def resume(connection: Connection, beaker: BeakerWrapper) -> None:
logger.info("Checking if resumes are needed.")
cursor = connection.cursor()
cursor.execute("SELECT * FROM active_experiments")
experiments = cursor.fetchall()
for experiment_row in experiments:
experiment_id, original_id, max_resumes, current_resume = experiment_row
status = beaker.get_status(experiment_id)
if status.is_end_state():
stop_autoresume(connection, experiment_id)
if status is BeakerStatus.preempted:
if current_resume >= max_resumes:
logger.info(
f"Experiment {experiment_id} preempted too many times "
f"({max_resumes}). Original experiment: {original_id}"
)
else:
new_experiment_id = beaker.resume(experiment_id)
logger.info(
f"Experiment {experiment_id} preempted "
f"({current_resume}/{max_resumes}). Resuming as: "
f"{new_experiment_id} Original experiment: {original_id}"
)
cursor.execute(
"INSERT INTO active_experiments VALUES (?, ?, ?, ?)",
(new_experiment_id, original_id, max_resumes, current_resume + 1),
)
connection.commit()
else:
logger.info(
f"Experiment {experiment_id} completed with status: "
f"{status}. Original experiment: {original_id}"
)
class Action(Enum):
start = "start"
stop = "stop"
resume = "resume"
def __str__(self):
return self.name
def main(args) -> None:
# Smooth load from potentially many daemons on different machines.
time.sleep(random.randint(0, args.random_delay_seconds))
db_path = f"{dot_allennlp_dir}/resume.db"
connection = sqlite3.connect(db_path)
# Create the DB if needed.
cursor = connection.cursor()
cursor.execute(
"SELECT name FROM sqlite_master WHERE type='table' AND name='active_experiments'"
)
tables = cursor.fetchall()
if not tables:
create_table(connection)
# Modify the crontab if needed.
crontab_l_result = subprocess.run(
["crontab", "-l"], universal_newlines=True, stdout=PIPE, stderr=PIPE
)
if crontab_l_result.returncode == 0:
current_crontab = crontab_l_result.stdout
else:
# `crontab -l` fails when a crontab hasn't been installed previously.
# Sanity check the error message to guard against blowing away the
# crontab in some obscure failure case.
assert "no crontab" in crontab_l_result.stderr, f"crontab failed: {crontab_l_result.stderr}"
current_crontab = ""
full_path = os.path.abspath(__file__)
if full_path not in current_crontab:
# Execute this script every ten minutes. We set the PATH to that used
# to run this install step to make sure that we have access to python3
# and beaker.
cron_line = (
f"*/10 * * * * bash -c 'export PATH={os.environ['PATH']};"
f" python3 {full_path} --action=resume --random-delay-seconds=60'\n"
)
new_crontab = current_crontab + cron_line
subprocess.run(["crontab", "-"], input=new_crontab, encoding="utf-8")
if args.action is Action.start:
assert args.experiment_id
start_autoresume(connection, args.experiment_id, args.max_resumes)
elif args.action is Action.stop:
assert args.experiment_id
stop_autoresume(connection, args.experiment_id)
elif args.action is Action.resume:
beaker = BeakerWrapper()
resume(connection, beaker)
else:
raise Exception(f"Unaccounted for action {args.action}")
connection.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--action", type=Action, choices=list(Action), required=True)
parser.add_argument("--experiment-id", type=str)
parser.add_argument("--max-resumes", type=int, default=10)
parser.add_argument("--random-delay-seconds", type=int, default=0)
args = parser.parse_args()
try:
main(args)
except Exception:
# Ensure traces are logged.
# TODO(brendanr): Is there a better way to do this?
logger.exception("Fatal error")
raise
| allennlp-master | scripts/ai2_internal/resume_daemon.py |
#! /usr/bin/env python
# Script to launch AllenNLP Beaker jobs.
import argparse
import os
import json
import random
import tempfile
import subprocess
import sys
# This has to happen before we import spacy (even indirectly), because for some crazy reason spacy
# thought it was a good idea to set the random seed on import...
random_int = random.randint(0, 2 ** 32)
sys.path.insert(
0, os.path.dirname(os.path.abspath(os.path.join(os.path.join(__file__, os.pardir), os.pardir)))
)
from allennlp.common.params import Params
def main(param_file: str, args: argparse.Namespace):
commit = subprocess.check_output(["git", "rev-parse", "HEAD"], universal_newlines=True).strip()
docker_image = f"allennlp/allennlp:{commit}"
overrides = args.overrides
# Reads params and sets environment.
ext_vars = {}
for var in args.env:
key, value = var.split("=")
ext_vars[key] = value
params = Params.from_file(param_file, overrides, ext_vars)
# Write params as json. Otherwise Jsonnet's import feature breaks.
params_dir = tempfile.mkdtemp(prefix="config")
compiled_params_path = os.path.join(params_dir, "config.json")
params.to_file(compiled_params_path)
print(f"Compiled jsonnet config written to {compiled_params_path}.")
flat_params = params.as_flat_dict()
env = {}
for k, v in flat_params.items():
k = str(k).replace(".", "_")
env[k] = str(v)
# If the git repository is dirty, add a random hash.
result = subprocess.run("git diff-index --quiet HEAD --", shell=True)
if result.returncode != 0:
dirty_hash = "%x" % random_int
docker_image += "-" + dirty_hash
if args.image:
image = args.image
print(f"Using the specified image: {image}")
else:
print(f"Building the Docker image ({docker_image})...")
subprocess.run(f"docker build -t {docker_image} .", shell=True, check=True)
print("Create a Beaker image...")
image = subprocess.check_output(
f"beaker image create --quiet {docker_image}", shell=True, universal_newlines=True
).strip()
print(f" Image created: {docker_image}")
config_dataset_id = subprocess.check_output(
f"beaker dataset create --quiet {params_dir}/*", shell=True, universal_newlines=True
).strip()
# Arguments that differ between preemptible and regular machine execution.
if args.preemptible:
allennlp_prefix = ["/stage/allennlp/resumable_train.sh", "/output", "/config/config.json"]
else:
allennlp_prefix = [
"python",
"-m",
"allennlp.run",
"train",
"/config/config.json",
"-s",
"/output",
]
# All other arguments
allennlp_suffix = ["--file-friendly-logging"]
for package_name in args.include_package:
allennlp_suffix.append("--include-package")
allennlp_suffix.append(package_name)
allennlp_command = allennlp_prefix + allennlp_suffix
dataset_mounts = []
for source in args.source + [f"{config_dataset_id}:/config"]:
datasetId, containerPath = source.split(":")
dataset_mounts.append({"datasetId": datasetId, "containerPath": containerPath})
for var in args.env:
key, value = var.split("=")
env[key] = value
requirements = {}
if args.cpu:
requirements["cpu"] = float(args.cpu)
if args.memory:
requirements["memory"] = args.memory
if args.gpu_count:
requirements["gpuCount"] = int(args.gpu_count)
if args.preemptible:
requirements["preemptible"] = True
config_spec = {
"description": args.desc,
"image": image,
"resultPath": "/output",
"args": allennlp_command,
"datasetMounts": dataset_mounts,
"requirements": requirements,
"env": env,
}
config_task = {"spec": config_spec, "name": "training"}
config = {"tasks": [config_task]}
output_path = (
args.spec_output_path
if args.spec_output_path
else tempfile.mkstemp(".yaml", "beaker-config-")[1]
)
with open(output_path, "w") as output:
output.write(json.dumps(config, indent=4))
print(f"Beaker spec written to {output_path}.")
experiment_command = ["beaker", "experiment", "create", "--quiet", "--file", output_path]
if args.name:
experiment_command.append("--name")
experiment_command.append(args.name.replace(" ", "-"))
def resume_command(experiment_id):
resume_daemon_path = os.path.join(os.path.dirname(__file__), "resume_daemon.py")
return [
# Run with python (instead of calling directly) in case the
# executable bit wasn't preserved for some reason.
"python3",
resume_daemon_path,
"--action=start",
f"--max-resumes={args.max_resumes}",
f"--experiment-id={experiment_id}",
]
if args.dry_run:
print("This is a dry run (--dry-run). Launch your job with the following command:")
print(" " + " ".join(experiment_command))
if args.max_resumes > 0:
print("Configure auto-resumes with the following command:")
print(" " + " ".join(resume_command("$YOUR_EXPERIMENT_ID")))
else:
print("Running the experiment:")
print(" " + " ".join(experiment_command))
experiment_id = subprocess.check_output(experiment_command, universal_newlines=True).strip()
print(
f"Experiment {experiment_id} submitted. "
f"See progress at https://beaker.org/ex/{experiment_id}"
)
if args.max_resumes > 0:
print("Configuring auto-resumes:")
print(" " + " ".join(resume_command(experiment_id)))
subprocess.run(resume_command(experiment_id))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("param_file", type=str, help="The model configuration file.")
parser.add_argument("--name", type=str, help="A name for the experiment.")
parser.add_argument(
"--spec_output_path", type=str, help="The destination to write the experiment spec."
)
parser.add_argument(
"--dry-run", action="store_true", help="If specified, an experiment will not be created."
)
parser.add_argument(
"--image", type=str, help="The image to use (if unspecified one will be built)"
)
parser.add_argument("--desc", type=str, help="A description for the experiment.")
parser.add_argument(
"--env",
action="append",
default=[],
help="Set environment variables (e.g. NAME=value or NAME)",
)
parser.add_argument(
"--source",
action="append",
default=[],
help="Bind a remote data source (e.g. source-id:/target/path)",
)
parser.add_argument("--cpu", help="CPUs to reserve for this experiment (e.g., 0.5)")
parser.add_argument(
"--gpu-count", default=1, help="GPUs to use for this experiment (e.g., 1 (default))"
)
parser.add_argument("--memory", help="Memory to reserve for this experiment (e.g., 1GB)")
parser.add_argument(
"--preemptible", action="store_true", help="Allow task to run on preemptible hardware"
)
parser.add_argument(
"--max-resumes",
type=int,
default=0,
help="When running with --preemptible, use a cronjob to automatically resume this many times.",
)
parser.add_argument(
"--include-package",
type=str,
action="append",
default=[],
help="Additional packages to include",
)
parser.add_argument(
"-o",
"--overrides",
type=str,
default="",
help="a JSON structure used to override the experiment configuration",
)
args = parser.parse_args()
if args.max_resumes > 0:
assert args.preemptible, "--max-resumes requires --preemptible!"
main(args.param_file, args)
| allennlp-master | scripts/ai2_internal/run_with_beaker.py |
from setuptools import setup, find_packages
import sys
import os
# PEP0440 compatible formatted version, see:
# https://www.python.org/dev/peps/pep-0440/
#
# release markers:
# X.Y
# X.Y.Z # For bugfix releases
#
# pre-release markers:
# X.YaN # Alpha release
# X.YbN # Beta release
# X.YrcN # Release Candidate
# X.Y # Final release
# version.py defines the VERSION and VERSION_SHORT variables.
# We use exec here so we don't import allennlp_hub whilst setting up.
VERSION = {}
with open("allennlp_hub/version.py") as version_file:
exec(version_file.read(), VERSION)
install_requirements = []
if not os.environ.get("EXCLUDE_ALLENNLP_IN_SETUP"):
# Warning: This will not give you the desired version if you've already
# installed allennlp! See https://github.com/pypa/pip/issues/5898.
#
# There used to be an alternative to this using `dependency_links`
# (https://stackoverflow.com/questions/3472430), but pip decided to
# remove this in version 19 breaking numerous projects in the process.
# See https://github.com/pypa/pip/issues/6162.
#
# As a mitigation, run `pip uninstall allennlp` before installing this
# package.
#
# TODO(brendanr): Make these point to released versions, when possible, by
# loading requirements.txt. Currently allennlp-semparse is unreleased and
# it depends on a specific allennlp SHA. Due to the aforementioned
# setuptools bug, we explicitly set the allennlp version here to be that
# required by allennlp-semparse.
allennlp_sha = "65ff0d87a30e6532cb21ea5fe8b7bd436445c128"
semparse_sha = "339e617861a7616618a503cc98e1e9c8b28a1b06"
reading_comprehension_sha = "e4f8e5df4f9fa35287d44e94fc8b26b9cabed0a5"
install_requirements = [
f"allennlp @ git+https://github.com/allenai/allennlp@{allennlp_sha}#egg=allennlp",
f"allennlp_semparse @ git+https://github.com/allenai/allennlp-semparse@{semparse_sha}#egg=allennlp-semparse",
f"allennlp_reading_comprehension @ git+https://github.com/allenai/allennlp-reading-comprehension@{reading_comprehension_sha}#egg=allennlp-reading-comprehension",
]
# make pytest-runner a conditional requirement,
# per: https://github.com/pytest-dev/pytest-runner#considerations
needs_pytest = {"pytest", "test", "ptr"}.intersection(sys.argv)
pytest_runner = ["pytest-runner"] if needs_pytest else []
setup_requirements = [
# add other setup requirements as necessary
] + pytest_runner
setup(
name="allennlp_hub",
version=VERSION["VERSION"],
description="A collection of selected of models built with AllenNLP.",
long_description=open("README.md").read(),
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: Python :: 3.6",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="allennlp NLP deep learning machine reading",
url="https://github.com/allenai/allennlp-hub",
author="Allen Institute for Artificial Intelligence",
author_email="[email protected]",
license="Apache",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=install_requirements,
setup_requires=setup_requirements,
tests_require=["pytest", "flaky", "responses>=0.7"],
include_package_data=True,
python_requires=">=3.6.1",
zip_safe=False,
)
| allennlp-hub-master | setup.py |
import pytest
import spacy
from allennlp.common.testing import AllenNlpTestCase
from allennlp_hub import pretrained
class AllenNlpSemparsePretrainedTest(AllenNlpTestCase):
def test_wikitables_parser(self):
predictor = pretrained.wikitables_parser_dasigi_2019()
table = """# Event Year Season Flag bearer
7 2012 Summer Ele Opeloge
6 2008 Summer Ele Opeloge
5 2004 Summer Uati Maposua
4 2000 Summer Pauga Lalau
3 1996 Summer Bob Gasio
2 1988 Summer Henry Smith
1 1984 Summer Apelu Ioane"""
question = "How many years were held in summer?"
result = predictor.predict_json({"table": table, "question": question})
assert result["answer"] == 7
assert (
result["logical_form"][0]
== "(count (filter_in all_rows string_column:season string:summer))"
)
def test_nlvr_parser(self):
predictor = pretrained.nlvr_parser_dasigi_2019()
structured_rep = """[
[
{"y_loc":13,"type":"square","color":"Yellow","x_loc":13,"size":20},
{"y_loc":20,"type":"triangle","color":"Yellow","x_loc":44,"size":30},
{"y_loc":90,"type":"circle","color":"#0099ff","x_loc":52,"size":10}
],
[
{"y_loc":57,"type":"square","color":"Black","x_loc":17,"size":20},
{"y_loc":30,"type":"circle","color":"#0099ff","x_loc":76,"size":10},
{"y_loc":12,"type":"square","color":"Black","x_loc":35,"size":10}
],
[
{"y_loc":40,"type":"triangle","color":"#0099ff","x_loc":26,"size":20},
{"y_loc":70,"type":"triangle","color":"Black","x_loc":70,"size":30},
{"y_loc":19,"type":"square","color":"Black","x_loc":35,"size":10}
]
]"""
sentence = "there is exactly one yellow object touching the edge"
result = predictor.predict_json(
{"structured_rep": structured_rep, "sentence": sentence}
)
assert result["denotations"][0] == ["False"]
assert (
result["logical_form"][0]
== "(object_count_equals (yellow (touch_wall all_objects)) 1)"
)
def test_atis_parser(self):
predictor = pretrained.atis_parser_lin_2019()
utterance = "give me flights on american airlines from milwaukee to phoenix"
result = predictor.predict_json({"utterance": utterance})
predicted_sql_query = """
(SELECT DISTINCT flight . flight_id
FROM flight
WHERE (flight . airline_code = 'AA'
AND (flight . from_airport IN
(SELECT airport_service . airport_code
FROM airport_service
WHERE airport_service . city_code IN
(SELECT city . city_code
FROM city
WHERE city . city_name = 'MILWAUKEE' ) )
AND flight . to_airport IN
(SELECT airport_service . airport_code
FROM airport_service
WHERE airport_service . city_code IN
(SELECT city . city_code
FROM city
WHERE city . city_name = 'PHOENIX' ) ))) ) ;"""
assert result["predicted_sql_query"] == predicted_sql_query
| allennlp-hub-master | tests/pretrained/allennlp_semparse_pretrained_test.py |
import pytest
import spacy
from allennlp.common.testing import AllenNlpTestCase
from allennlp_hub import pretrained
class AllenNlpPretrainedTest(AllenNlpTestCase):
def test_machine_comprehension(self):
predictor = pretrained.bidirectional_attention_flow_seo_2017()
passage = """The Matrix is a 1999 science fiction action film written and directed by The Wachowskis, starring Keanu Reeves, Laurence Fishburne, Carrie-Anne Moss, Hugo Weaving, and Joe Pantoliano. It depicts a dystopian future in which reality as perceived by most humans is actually a simulated reality called "the Matrix", created by sentient machines to subdue the human population, while their bodies' heat and electrical activity are used as an energy source. Computer programmer Neo" learns this truth and is drawn into a rebellion against the machines, which involves other people who have been freed from the "dream world". """
question = "Who stars in The Matrix?"
result = predictor.predict_json({"passage": passage, "question": question})
correct = "Keanu Reeves, Laurence Fishburne, Carrie-Anne Moss, Hugo Weaving, and Joe Pantoliano"
assert correct == result["best_span_str"]
def test_semantic_role_labeling(self):
predictor = pretrained.srl_with_elmo_luheng_2018()
sentence = "If you liked the music we were playing last night, you will absolutely love what we're playing tomorrow!"
result = predictor.predict_json({"sentence": sentence})
assert result["words"] == [
"If",
"you",
"liked",
"the",
"music",
"we",
"were",
"playing",
"last",
"night",
",",
"you",
"will",
"absolutely",
"love",
"what",
"we",
"'re",
"playing",
"tomorrow",
"!",
]
assert result["verbs"] == [
{
"verb": "liked",
"description": "If [ARG0: you] [V: liked] [ARG1: the music we were playing last night] , you will absolutely love what we 're playing tomorrow !",
"tags": [
"O",
"B-ARG0",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
],
},
{
"verb": "playing",
"description": "If you liked [ARG1: the music] [ARG0: we] were [V: playing] [ARGM-TMP: last] night , you will absolutely love what we 're playing tomorrow !",
"tags": [
"O",
"O",
"O",
"B-ARG1",
"I-ARG1",
"B-ARG0",
"O",
"B-V",
"B-ARGM-TMP",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
],
},
{
"verb": "will",
"description": "If you liked the music we were playing last night , you [V: will] absolutely love what we 're playing tomorrow !",
"tags": [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"B-V",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
],
},
{
"verb": "love",
"description": "[ARGM-ADV: If you liked the music we were playing last night] , [ARG0: you] [ARGM-MOD: will] [ARGM-ADV: absolutely] [V: love] [ARG1: what we 're playing tomorrow] !",
"tags": [
"B-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"I-ARGM-ADV",
"O",
"B-ARG0",
"B-ARGM-MOD",
"B-ARGM-ADV",
"B-V",
"B-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"I-ARG1",
"O",
],
},
{
"verb": "playing",
"description": "If you liked the music we were playing last night , you will absolutely love [ARG1: what] [ARG0: we] 're [V: playing] [ARGM-TMP: tomorrow] !",
"tags": [
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"O",
"B-ARG1",
"B-ARG0",
"O",
"B-V",
"B-ARGM-TMP",
"O",
],
},
]
def test_textual_entailment(self):
predictor = pretrained.decomposable_attention_with_elmo_parikh_2017()
result = predictor.predict_json(
{
"premise": "An interplanetary spacecraft is in orbit around a gas giant's icy moon.",
"hypothesis": "The spacecraft has the ability to travel between planets.",
}
)
assert result["label_probs"][0] > 0.7 # entailment
result = predictor.predict_json(
{
"premise": "Two women are wandering along the shore drinking iced tea.",
"hypothesis": "Two women are sitting on a blanket near some rocks talking about politics.",
}
)
assert result["label_probs"][1] > 0.8 # contradiction
result = predictor.predict_json(
{
"premise": "A large, gray elephant walked beside a herd of zebras.",
"hypothesis": "The elephant was lost.",
}
)
assert result["label_probs"][2] > 0.6 # neutral
def test_coreference_resolution(self):
predictor = pretrained.neural_coreference_resolution_lee_2017()
document = "We 're not going to skimp on quality , but we are very focused to make next year . The only problem is that some of the fabrics are wearing out - since I was a newbie I skimped on some of the fabric and the poor quality ones are developing holes ."
result = predictor.predict_json({"document": document})
print(result)
assert result["clusters"] == [
[[0, 0], [10, 10]],
[[33, 33], [37, 37]],
[[26, 27], [42, 43]],
]
assert result["document"] == [
"We",
"'re",
"not",
"going",
"to",
"skimp",
"on",
"quality",
",",
"but",
"we",
"are",
"very",
"focused",
"to",
"make",
"next",
"year",
".",
"The",
"only",
"problem",
"is",
"that",
"some",
"of",
"the",
"fabrics",
"are",
"wearing",
"out",
"-",
"since",
"I",
"was",
"a",
"newbie",
"I",
"skimped",
"on",
"some",
"of",
"the",
"fabric",
"and",
"the",
"poor",
"quality",
"ones",
"are",
"developing",
"holes",
".",
]
def test_ner(self):
predictor = pretrained.named_entity_recognition_with_elmo_peters_2018()
sentence = """Michael Jordan is a professor at Berkeley."""
result = predictor.predict_json({"sentence": sentence})
assert result["words"] == [
"Michael",
"Jordan",
"is",
"a",
"professor",
"at",
"Berkeley",
".",
]
assert result["tags"] == ["B-PER", "L-PER", "O", "O", "O", "O", "U-LOC", "O"]
@pytest.mark.skipif(
spacy.__version__ < "2.1", reason="this model changed from 2.0 to 2.1"
)
def test_constituency_parsing(self):
predictor = pretrained.span_based_constituency_parsing_with_elmo_joshi_2018()
sentence = """Pierre Vinken died aged 81; immortalised aged 61."""
result = predictor.predict_json({"sentence": sentence})
assert result["tokens"] == [
"Pierre",
"Vinken",
"died",
"aged",
"81",
";",
"immortalised",
"aged",
"61",
".",
]
assert (
result["trees"]
== "(S (NP (NNP Pierre) (NNP Vinken)) (VP (VP (VBD died) (NP (JJ aged) (CD 81))) (, ;) (VP (VBN immortalised) (S (ADJP (JJ aged) (CD 61))))) (. .))"
)
def test_dependency_parsing(self):
predictor = pretrained.biaffine_parser_stanford_dependencies_todzat_2017()
sentence = """He ate spaghetti with chopsticks."""
result = predictor.predict_json({"sentence": sentence})
# Note that this tree is incorrect. We are checking here that the decoded
# tree is _actually a tree_ - in greedy decoding versions of the dependency
# parser, this sentence has multiple heads. This test shouldn't really live here,
# but it's very difficult to re-create a concrete example of this behaviour without
# a trained dependency parser.
assert result["words"] == ["He", "ate", "spaghetti", "with", "chopsticks", "."]
assert result["pos"] == ["PRP", "VBD", "NNS", "IN", "NNS", "."]
assert result["predicted_dependencies"] == [
"nsubj",
"root",
"dobj",
"prep",
"pobj",
"punct",
]
assert result["predicted_heads"] == [2, 0, 2, 2, 4, 2]
def test_openie(self):
predictor = pretrained.open_information_extraction_stanovsky_2018()
result = predictor.predict_json({"sentence": "I'm against picketing, but I don't know how to show it."})
assert "verbs" in result
assert "words" in result
| allennlp-hub-master | tests/pretrained/allennlp_pretrained_test.py |
_MAJOR = "0"
_MINOR = "0"
_REVISION = "1-unreleased"
VERSION_SHORT = "{0}.{1}".format(_MAJOR, _MINOR)
VERSION = "{0}.{1}.{2}".format(_MAJOR, _MINOR, _REVISION)
| allennlp-hub-master | allennlp_hub/version.py |
allennlp-hub-master | allennlp_hub/__init__.py |
|
from allennlp_hub.pretrained.helpers import _load_predictor
from allennlp_semparse import predictors as semparse_predictors
import allennlp_semparse.models
import allennlp_semparse.dataset_readers
# AllenNLP Semparse models
def wikitables_parser_dasigi_2019() -> semparse_predictors.WikiTablesParserPredictor:
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/wikitables-model-2020.02.10.tar.gz",
"wikitables-parser",
)
return predictor
def nlvr_parser_dasigi_2019() -> semparse_predictors.NlvrParserPredictor:
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/nlvr-erm-model-2020.02.10-rule-vocabulary-updated.tar.gz",
"nlvr-parser",
)
return predictor
def atis_parser_lin_2019() -> semparse_predictors.AtisParserPredictor:
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/atis-parser-2020.02.10.tar.gz",
"atis-parser",
)
return predictor
| allennlp-hub-master | allennlp_hub/pretrained/allennlp_semparse_pretrained.py |
from allennlp_hub.pretrained.allennlp_pretrained import *
from allennlp_hub.pretrained.allennlp_semparse_pretrained import *
| allennlp-hub-master | allennlp_hub/pretrained/__init__.py |
from allennlp import predictors
from allennlp_rc import predictors as rc_predictors
from allennlp_hub.pretrained.helpers import _load_predictor
import allennlp.models
# Models in the main repo
def srl_with_elmo_luheng_2018() -> predictors.SemanticRoleLabelerPredictor:
"""
Semantic Role Labeling
Based on [He et al, 2017](https://www.semanticscholar.org/paper/Deep-Semantic-Role-Labeling-What-Works-and-What-s-He-Lee/a3ccff7ad63c2805078b34b8514fa9eab80d38e9)
f1: 0.849
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/srl-model-2020.02.10.tar.gz",
"semantic-role-labeling",
)
return predictor
def bert_srl_shi_2019() -> predictors.SemanticRoleLabelerPredictor:
predictor = _load_predictor(
"https://s3-us-west-2.amazonaws.com/allennlp/models/bert-base-srl-2020.02.10.tar.gz",
"semantic-role-labeling",
)
return predictor
def bidirectional_attention_flow_seo_2017() -> rc_predictors.ReadingComprehensionPredictor:
"""
Reading Comprehension
Based on `BiDAF (Seo et al, 2017) <https://www.semanticscholar.org/paper/Bidirectional-Attention-Flow-for-Machine-Comprehen-Seo-Kembhavi/007ab5528b3bd310a80d553cccad4b78dc496b02>`_
.. code-block:: bash
$ docker run allennlp/allennlp:v0.7.0
evaluate
https://allennlp.s3.amazonaws.com/models/bidaf-model-2020.02.10-charpad.tar.gz
https://allennlp.s3.amazonaws.com/datasets/squad/squad-dev-v1.1.json
Metrics:
* start_acc: 0.642
* end_acc: 0.671
* span_acc: 0.552
* em: 0.683
* f1: 0.778
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/bidaf-model-2020.02.10-charpad.tar.gz",
"reading-comprehension",
)
return predictor
def naqanet_dua_2019() -> rc_predictors.ReadingComprehensionPredictor:
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/naqanet-2020.02.10-fixed-weight-names.tar.gz",
"reading-comprehension",
)
return predictor
def open_information_extraction_stanovsky_2018() -> predictors.OpenIePredictor:
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/openie-model.2020.02.10.tar.gz",
"open-information-extraction",
)
return predictor
def decomposable_attention_with_elmo_parikh_2017() -> predictors.DecomposableAttentionPredictor:
"""
Textual Entailment
Based on `Parikh et al, 2017 <https://www.semanticscholar.org/paper/A-Decomposable-Attention-Model-for-Natural-Languag-Parikh-T%C3%A4ckstr%C3%B6m/07a9478e87a8304fc3267fa16e83e9f3bbd98b27>`_
.. code-block:: bash
$ docker run allennlp/allennlp:v0.7.0
evaluate
https://allennlp.s3.amazonaws.com/models/decomposable-attention-elmo-2020.02.10.tar.gz
https://allennlp.s3.amazonaws.com/datasets/snli/snli_1.0_test.jsonl
Metrics:
accuracy: 0.864
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/decomposable-attention-elmo-2020.02.10.tar.gz",
"textual-entailment",
)
return predictor
def neural_coreference_resolution_lee_2017() -> predictors.CorefPredictor:
"""
Coreference Resolution
Based on `End-to-End Coreference Resolution (Lee et al, 2017) <https://www.semanticscholar.org/paper/End-to-end-Neural-Coreference-Resolution-Lee-He/3f2114893dc44eacac951f148fbff142ca200e83>`_
f1: 0.630
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/coref-model-2020.02.10.tar.gz",
"coreference-resolution",
)
predictor._dataset_reader._token_indexers[
"token_characters"
]._min_padding_length = 5
return predictor
def named_entity_recognition_with_elmo_peters_2018() -> predictors.SentenceTaggerPredictor:
"""
Named Entity Recognition
Based on `Deep contextualized word representations <https://arxiv.org/abs/1802.05365>`_
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/ner-model-2020.02.10.tar.gz",
"sentence-tagger",
)
predictor._dataset_reader._token_indexers[
"token_characters"
]._min_padding_length = 3
return predictor
def fine_grained_named_entity_recognition_with_elmo_peters_2018() -> predictors.SentenceTaggerPredictor:
"""
Fine Grained Named Entity Recognition
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/fine-grained-ner-model-elmo-2020.02.10.tar.gz",
"sentence-tagger",
)
predictor._dataset_reader._token_indexers[
"token_characters"
]._min_padding_length = 3
return predictor
def span_based_constituency_parsing_with_elmo_joshi_2018() -> predictors.ConstituencyParserPredictor:
"""
Constituency Parsing
Based on `Minimal Span Based Constituency Parser (Stern et al, 2017) <https://www.semanticscholar.org/paper/A-Minimal-Span-Based-Neural-Constituency-Parser-Stern-Andreas/593e4e749bd2dbcaf8dc25298d830b41d435e435>`_ but with ELMo embeddings
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/elmo-constituency-parser-2020.02.10.tar.gz",
"constituency-parser",
)
return predictor
def biaffine_parser_stanford_dependencies_todzat_2017() -> predictors.BiaffineDependencyParserPredictor:
"""
Biaffine Dependency Parser (Stanford Dependencies)
Based on `Dozat and Manning, 2017 <https://arxiv.org/pdf/1611.01734.pdf>`_
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/biaffine-dependency-parser-ptb-2020.02.10.tar.gz",
"biaffine-dependency-parser",
)
return predictor
def biaffine_parser_universal_dependencies_todzat_2017() -> predictors.BiaffineDependencyParserPredictor:
"""
Biaffine Dependency Parser (Universal Dependencies)
Based on `Dozat and Manning, 2017 <https://arxiv.org/pdf/1611.01734.pdf>`_
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/biaffine-dependency-parser-ud-2020.02.10.tar.gz",
"biaffine-dependency-parser",
)
return predictor
def esim_nli_with_elmo_chen_2017() -> predictors.DecomposableAttentionPredictor:
"""
ESIM
Based on `Enhanced LSTM for Natural Language Inference <https://arxiv.org/pdf/1609.06038.pdf>`_ and uses ELMo
"""
predictor = _load_predictor(
"https://allennlp.s3.amazonaws.com/models/esim-elmo-2020.02.10.tar.gz",
"textual-entailment",
)
return predictor
| allennlp-hub-master | allennlp_hub/pretrained/allennlp_pretrained.py |
from allennlp.predictors import Predictor
from allennlp.models.archival import load_archive
def _load_predictor(archive_file: str, predictor_name: str) -> Predictor:
"""
Helper to load the desired predictor from the given archive.
"""
archive = load_archive(archive_file)
return Predictor.from_archive(archive, predictor_name)
| allennlp-hub-master | allennlp_hub/pretrained/helpers.py |
import ai2thor.controller
import ai2thor.robot_controller
import random
import time
import numpy as np
from pprint import pprint
fps = ["FloorPlan311"]
runs = [
{"id": "unity", "port": 8200, "controller": ai2thor.controller.Controller()},
{"id": "robot", "port": 9200, "controller": ai2thor.robot_controller.Controller()}
# {'id': 'robot', 'port': 9000, 'controller': ai2thor.robot_controller.Controller()}
]
for run_config in runs:
port = run_config["port"]
controller = run_config["controller"]
event = controller.start(start_unity=False, host="127.0.0.1", port=port)
# event = controller.step({'action': 'ChangeQuality', 'quality': 'High'})
# event = controller.step({"action": "ChangeResolution", "x": 300, "y": 300})
for fp in fps:
print(fp)
for i in range(1):
event = controller.reset(fp)
# event = controller.step(dict(action='Initialize', gridSize=0.25, fieldOfView=90, renderInstanceSegmentation=True))
# event = controller.step(dict(action='InitialRandomSpawn', forceVisible=True, maxNumRepeats=10, randomSeed=1))
# event = controller.step(dict(action='MoveAhead', noise=0.02))
event = controller.step(dict(action="RotateLeft"))
print("event for '{}':".format(run_config["id"]))
pprint(event.metadata)
time.sleep(1)
| ai2thor-main | test_controllers.py |
import os
import signal
import sys
if os.name != "nt":
import fcntl
import datetime
import json
import re
import time
import zipfile
import threading
import hashlib
import shutil
import subprocess
import pprint
import random
from typing import Dict
from invoke import task
import boto3
import botocore.exceptions
import multiprocessing
import io
import ai2thor.build
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s [%(process)d] %(funcName)s - %(levelname)s - %(message)s"
)
handler.setFormatter(formatter)
logger.addHandler(handler)
content_types = {
".js": "application/javascript; charset=utf-8",
".html": "text/html; charset=utf-8",
".ico": "image/x-icon",
".svg": "image/svg+xml; charset=utf-8",
".css": "text/css; charset=utf-8",
".png": "image/png",
".txt": "text/plain",
".jpg": "image/jpeg",
".wasm": "application/wasm",
".data": "application/octet-stream",
".unityweb": "application/octet-stream",
".json": "application/json",
}
class ForcedFailure(Exception):
pass
def add_files(zipf, start_dir, exclude_ext=()):
for root, dirs, files in os.walk(start_dir):
for f in files:
fn = os.path.join(root, f)
if any(map(lambda ext: fn.endswith(ext), exclude_ext)):
# print("skipping file %s" % fn)
continue
arcname = os.path.relpath(fn, start_dir)
if arcname.split("/")[0].endswith(
"_BackUpThisFolder_ButDontShipItWithYourGame"
):
# print("skipping %s" % arcname)
continue
# print("adding %s" % arcname)
zipf.write(fn, arcname)
def push_build(build_archive_name, zip_data, include_private_scenes):
logger.info("start of push_build")
import boto3
from base64 import b64encode
# subprocess.run("ls %s" % build_archive_name, shell=True)
# subprocess.run("gsha256sum %s" % build_archive_name)
logger.info("boto3 resource")
s3 = boto3.resource("s3", region_name="us-west-2")
acl = "public-read"
bucket = ai2thor.build.PUBLIC_S3_BUCKET
if include_private_scenes:
bucket = ai2thor.build.PRIVATE_S3_BUCKET
acl = "private"
logger.info("archive base")
archive_base = os.path.basename(build_archive_name)
key = "builds/%s" % (archive_base,)
sha256_key = "builds/%s.sha256" % (os.path.splitext(archive_base)[0],)
logger.info("hashlib sha256")
sha = hashlib.sha256(zip_data)
try:
logger.info("pushing build %s" % (key,))
s3.Object(bucket, key).put(
Body=zip_data,
ACL=acl,
ChecksumSHA256=b64encode(sha.digest()).decode("ascii"),
)
logger.info("pushing sha256 %s" % (sha256_key,))
s3.Object(bucket, sha256_key).put(
Body=sha.hexdigest(), ACL=acl, ContentType="text/plain"
)
except botocore.exceptions.ClientError as e:
logger.error("caught error uploading archive %s: %s" % (build_archive_name, e))
logger.info("pushed build %s to %s" % (bucket, build_archive_name))
def _webgl_local_build_path(prefix, source_dir="builds"):
return os.path.join(
os.getcwd(), "unity/{}/thor-{}-WebGL/".format(source_dir, prefix)
)
def _unity_version():
import yaml
with open("unity/ProjectSettings/ProjectVersion.txt") as pf:
project_version = yaml.load(pf.read(), Loader=yaml.FullLoader)
return project_version["m_EditorVersion"]
def _unity_path():
unity_version = _unity_version()
standalone_path = None
if sys.platform.startswith("darwin"):
unity_hub_path = (
"/Applications/Unity/Hub/Editor/{}/Unity.app/Contents/MacOS/Unity".format(
unity_version
)
)
# /Applications/Unity/2019.4.20f1/Unity.app/Contents/MacOS
standalone_path = (
"/Applications/Unity/{}/Unity.app/Contents/MacOS/Unity".format(
unity_version
)
)
# standalone_path = (
# "/Applications/Unity-{}/Unity.app/Contents/MacOS/Unity".format(
# unity_version
# )
# )
elif "win" in sys.platform:
unity_hub_path = "C:/PROGRA~1/Unity/Hub/Editor/{}/Editor/Unity.exe".format(
unity_version
)
# TODO: Verify windows unity standalone path
standalone_path = "C:/PROGRA~1/{}/Editor/Unity.exe".format(unity_version)
elif sys.platform.startswith("linux"):
unity_hub_path = "{}/Unity/Hub/Editor/{}/Editor/Unity".format(
os.environ["HOME"], unity_version
)
if standalone_path and os.path.exists(standalone_path):
unity_path = standalone_path
else:
unity_path = unity_hub_path
return unity_path
def _build(
unity_path: str,
arch: str,
build_dir: str,
build_name: str,
env: Dict[str, str],
timeout: int = 3600,
print_interval: int = 60,
):
project_path = os.path.join(os.getcwd(), unity_path)
build_target_map = dict(OSXIntel64="OSXUniversal")
command = (
f"{_unity_path()}"
f" -quit"
f" -batchmode"
f" -logFile {os.getcwd()}/{build_name}.log"
f" -projectpath {project_path}"
f" -buildTarget {build_target_map.get(arch, arch)}"
f" -executeMethod Build.{arch}"
)
target_path = os.path.join(build_dir, build_name)
full_env = os.environ.copy()
full_env.update(env)
full_env["UNITY_BUILD_NAME"] = target_path
process = subprocess.Popen(command, shell=True, env=full_env)
start = time.time()
while True:
time.sleep(10) # Check for build completion every 10 seconds
if process.poll() is not None: # Process has finished.
break
elapsed = time.time() - start
if elapsed > timeout:
logger.error(
f"Timeout occurred when running command:\n{command}\nKilling the process."
)
os.kill(process.pid, signal.SIGKILL)
os.waitpid(-1, os.WNOHANG)
return False
if elapsed // print_interval > (elapsed - print_interval) // print_interval:
print(
f"{print_interval}-second interval reached. Process is still running."
)
logger.info(f"Exited with code {process.returncode}")
success = process.returncode == 0
if success:
generate_build_metadata(os.path.join(project_path, build_dir, "metadata.json"))
else:
logger.error(f"Error occurred when running command:\n{command}")
return success
def generate_build_metadata(metadata_path: str):
# this server_types metadata is maintained
# to allow future versions of the Python API
# to launch older versions of the Unity build
# and know whether the Fifo server is available
server_types = ["WSGI"]
try:
import ai2thor.fifo_server
server_types.append("FIFO")
except Exception as e:
pass
with open(metadata_path, "w") as f:
f.write(json.dumps(dict(server_types=server_types)))
def class_dataset_images_for_scene(scene_name):
import ai2thor.controller
from itertools import product
import numpy as np
import cv2
env = ai2thor.controller.Controller(quality="Low")
player_size = 300
zoom_size = 1000
target_size = 256
rotations = [0, 90, 180, 270]
horizons = [330, 0, 30]
buffer = 15
# object must be at least 40% in view
min_size = ((target_size * 0.4) / zoom_size) * player_size
env.start(width=player_size, height=player_size)
env.reset(scene_name)
event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=True,
renderSemanticSegmentation=False,
renderImage=False,
)
)
for o in event.metadata["objects"]:
if o["receptacle"] and o["receptacleObjectIds"] and o["openable"]:
print("opening %s" % o["objectId"])
env.step(
dict(action="OpenObject", objectId=o["objectId"], forceAction=True)
)
event = env.step(dict(action="GetReachablePositions", gridSize=0.25))
visible_object_locations = []
for point in event.metadata["actionReturn"]:
for rot, hor in product(rotations, horizons):
exclude_colors = set(
map(tuple, np.unique(event.instance_segmentation_frame[0], axis=0))
)
exclude_colors.update(
set(
map(
tuple,
np.unique(event.instance_segmentation_frame[:, -1, :], axis=0),
)
)
)
exclude_colors.update(
set(
map(tuple, np.unique(event.instance_segmentation_frame[-1], axis=0))
)
)
exclude_colors.update(
set(
map(
tuple,
np.unique(event.instance_segmentation_frame[:, 0, :], axis=0),
)
)
)
event = env.step(
dict(
action="TeleportFull",
x=point["x"],
y=point["y"],
z=point["z"],
rotation=rot,
horizon=hor,
forceAction=True,
),
raise_for_failure=True,
)
visible_objects = []
for o in event.metadata["objects"]:
if o["visible"] and o["objectId"] and o["pickupable"]:
color = event.object_id_to_color[o["objectId"]]
mask = (
(event.instance_segmentation_frame[:, :, 0] == color[0])
& (event.instance_segmentation_frame[:, :, 1] == color[1])
& (event.instance_segmentation_frame[:, :, 2] == color[2])
)
points = np.argwhere(mask)
if len(points) > 0:
min_y = int(np.min(points[:, 0]))
max_y = int(np.max(points[:, 0]))
min_x = int(np.min(points[:, 1]))
max_x = int(np.max(points[:, 1]))
max_dim = max((max_y - min_y), (max_x - min_x))
if (
max_dim > min_size
and min_y > buffer
and min_x > buffer
and max_x < (player_size - buffer)
and max_y < (player_size - buffer)
):
visible_objects.append(
dict(
objectId=o["objectId"],
min_x=min_x,
min_y=min_y,
max_x=max_x,
max_y=max_y,
)
)
print(
"[%s] including object id %s %s"
% (scene_name, o["objectId"], max_dim)
)
if visible_objects:
visible_object_locations.append(
dict(point=point, rot=rot, hor=hor, visible_objects=visible_objects)
)
env.stop()
env = ai2thor.controller.Controller()
env.start(width=zoom_size, height=zoom_size)
env.reset(scene_name)
event = env.step(dict(action="Initialize", gridSize=0.25))
for o in event.metadata["objects"]:
if o["receptacle"] and o["receptacleObjectIds"] and o["openable"]:
print("opening %s" % o["objectId"])
env.step(
dict(action="OpenObject", objectId=o["objectId"], forceAction=True)
)
for vol in visible_object_locations:
point = vol["point"]
event = env.step(
dict(
action="TeleportFull",
x=point["x"],
y=point["y"],
z=point["z"],
rotation=vol["rot"],
horizon=vol["hor"],
forceAction=True,
),
raise_for_failure=True,
)
for v in vol["visible_objects"]:
object_id = v["objectId"]
min_y = int(round(v["min_y"] * (zoom_size / player_size)))
max_y = int(round(v["max_y"] * (zoom_size / player_size)))
max_x = int(round(v["max_x"] * (zoom_size / player_size)))
min_x = int(round(v["min_x"] * (zoom_size / player_size)))
delta_y = max_y - min_y
delta_x = max_x - min_x
scaled_target_size = max(delta_x, delta_y, target_size) + buffer * 2
if min_x > (zoom_size - max_x):
start_x = min_x - (scaled_target_size - delta_x)
end_x = max_x + buffer
else:
end_x = max_x + (scaled_target_size - delta_x)
start_x = min_x - buffer
if min_y > (zoom_size - max_y):
start_y = min_y - (scaled_target_size - delta_y)
end_y = max_y + buffer
else:
end_y = max_y + (scaled_target_size - delta_y)
start_y = min_y - buffer
# print("max x %s max y %s min x %s min y %s" % (max_x, max_y, min_x, min_y))
# print("start x %s start_y %s end_x %s end y %s" % (start_x, start_y, end_x, end_y))
print("storing %s " % object_id)
img = event.cv2img[start_y:end_y, start_x:end_x, :]
dst = cv2.resize(
img, (target_size, target_size), interpolation=cv2.INTER_LANCZOS4
)
object_type = object_id.split("|")[0].lower()
target_dir = os.path.join("images", scene_name, object_type)
h = hashlib.md5()
h.update(json.dumps(point, sort_keys=True).encode("utf8"))
h.update(json.dumps(v, sort_keys=True).encode("utf8"))
os.makedirs(target_dir, exist_ok=True)
cv2.imwrite(os.path.join(target_dir, h.hexdigest() + ".png"), dst)
env.stop()
return scene_name
@task
def build_class_dataset(context):
import concurrent.futures
import ai2thor.controller
multiprocessing.set_start_method("spawn")
controller = ai2thor.controller.Controller()
executor = concurrent.futures.ProcessPoolExecutor(max_workers=4)
futures = []
for scene in controller.scene_names():
print("processing scene %s" % scene)
futures.append(executor.submit(class_dataset_images_for_scene, scene))
for f in concurrent.futures.as_completed(futures):
scene = f.result()
print("scene name complete: %s" % scene)
def local_build_name(prefix, arch):
return "thor-%s-%s" % (prefix, arch)
@task
def local_build_test(context, prefix="local", arch="OSXIntel64"):
from ai2thor.tests.constants import TEST_SCENE
local_build(context, prefix, arch, [TEST_SCENE])
@task(iterable=["scenes"])
def local_build(
context, prefix="local", arch="OSXIntel64", scenes=None, scripts_only=False
):
import ai2thor.controller
build = ai2thor.build.Build(arch, prefix, False)
env = dict()
if os.path.isdir("unity/Assets/Private/Scenes"):
env["INCLUDE_PRIVATE_SCENES"] = "true"
build_dir = os.path.join("builds", build.name)
if scripts_only:
env["BUILD_SCRIPTS_ONLY"] = "true"
if scenes:
env["BUILD_SCENES"] = ",".join(
map(ai2thor.controller.Controller.normalize_scene, scenes)
)
if _build("unity", arch, build_dir, build.name, env=env):
print("Build Successful")
else:
print("Build Failure")
generate_quality_settings(context)
@task
def webgl_build(
context,
scenes="",
room_ranges=None,
directory="builds",
prefix="local",
verbose=False,
content_addressable=False,
crowdsource_build=False,
):
"""
Creates a WebGL build
:param context:
:param scenes: String of scenes to include in the build as a comma separated list
:param prefix: Prefix name for the build
:param content_addressable: Whether to change the unityweb build files to be content-addressable
have their content hashes as part of their names.
:return:
"""
from functools import reduce
def file_to_content_addressable(file_path):
# name_split = os.path.splitext(file_path)
path_split = os.path.split(file_path)
directory = path_split[0]
file_name = path_split[1]
print("File name {} ".format(file_name))
with open(file_path, "rb") as f:
h = hashlib.md5()
h.update(f.read())
md5_id = h.hexdigest()
new_file_name = "{}_{}".format(md5_id, file_name)
os.rename(file_path, os.path.join(directory, new_file_name))
arch = "WebGL"
build_name = local_build_name(prefix, arch)
if room_ranges is not None:
floor_plans = [
"FloorPlan{}_physics".format(i)
for i in reduce(
lambda x, y: x + y,
map(
lambda x: x + [x[-1] + 1],
[
list(range(*tuple(int(y) for y in x.split("-"))))
for x in room_ranges.split(",")
],
),
)
]
scenes = ",".join(floor_plans)
if verbose:
print(scenes)
env = dict(BUILD_SCENES=scenes)
# https://forum.unity.com/threads/cannot-build-for-webgl-in-unity-system-dllnotfoundexception.1254429/
# without setting this environment variable the error mentioned in the thread will get thrown
os.environ["EMSDK_PYTHON"] = "/usr/bin/python3"
if crowdsource_build:
env["DEFINES"] = "CROWDSOURCE_TASK"
if _build("unity", arch, directory, build_name, env=env):
print("Build Successful")
else:
print("Build Failure")
build_path = _webgl_local_build_path(prefix, directory)
generate_quality_settings(context)
# the remainder of this is only used to generate scene metadata, but it
# is not part of building webgl player
rooms = {
"kitchens": {"name": "Kitchens", "roomRanges": range(1, 31)},
"livingRooms": {"name": "Living Rooms", "roomRanges": range(201, 231)},
"bedrooms": {"name": "Bedrooms", "roomRanges": range(301, 331)},
"bathrooms": {"name": "Bathrooms", "roomRanges": range(401, 431)},
"foyers": {"name": "Foyers", "roomRanges": range(501, 531)},
}
room_type_by_id = {}
for room_type, room_data in rooms.items():
for room_num in room_data["roomRanges"]:
room_id = "FloorPlan{}_physics".format(room_num)
room_type_by_id[room_id] = {"type": room_type, "name": room_data["name"]}
scene_metadata = {}
for scene_name in scenes.split(","):
if scene_name not in room_type_by_id:
# allows for arbitrary scenes to be included dynamically
room_type = {"type": "Other", "name": None}
else:
room_type = room_type_by_id[scene_name]
if room_type["type"] not in scene_metadata:
scene_metadata[room_type["type"]] = {
"scenes": [],
"name": room_type["name"],
}
scene_metadata[room_type["type"]]["scenes"].append(scene_name)
if verbose:
print(scene_metadata)
to_content_addressable = [
("{}.data".format(build_name), "dataUrl"),
("{}.loader.js".format(build_name), "loaderUrl"),
("{}.wasm".format(build_name), "wasmCodeUrl"),
("{}.framework.js".format(build_name), "wasmFrameworkUrl"),
]
if content_addressable:
for file_name, key in to_content_addressable:
file_to_content_addressable(
os.path.join(build_path, "Build/{}".format(file_name)),
)
with open(os.path.join(build_path, "scenes.json"), "w") as f:
f.write(json.dumps(scene_metadata, sort_keys=False, indent=4))
@task
def generate_quality_settings(ctx):
import yaml
class YamlUnity3dTag(yaml.SafeLoader):
def let_through(self, node):
return self.construct_mapping(node)
YamlUnity3dTag.add_constructor(
"tag:unity3d.com,2011:47", YamlUnity3dTag.let_through
)
qs = yaml.load(
open("unity/ProjectSettings/QualitySettings.asset").read(),
Loader=YamlUnity3dTag,
)
quality_settings = {}
default = "Ultra"
for i, q in enumerate(qs["QualitySettings"]["m_QualitySettings"]):
quality_settings[q["name"]] = i
assert default in quality_settings
with open("ai2thor/_quality_settings.py", "w") as f:
f.write("# GENERATED FILE - DO NOT EDIT\n")
f.write("DEFAULT_QUALITY = '%s'\n" % default)
f.write("QUALITY_SETTINGS = " + pprint.pformat(quality_settings))
def git_commit_comment():
comment = (
subprocess.check_output("git log -n 1 --format=%B", shell=True)
.decode("utf8")
.strip()
)
return comment
def git_commit_id():
commit_id = (
subprocess.check_output("git log -n 1 --format=%H", shell=True)
.decode("ascii")
.strip()
)
return commit_id
@task
def deploy_pip(context):
if "TWINE_PASSWORD" not in os.environ:
raise Exception("Twine token not specified in environment")
subprocess.check_call("twine upload -u __token__ dist/*", shell=True)
@task
def push_pip_commit(context):
import glob
commit_id = git_commit_id()
s3 = boto3.resource("s3")
for g in glob.glob("dist/ai2thor-0+%s*" % commit_id):
acl = "public-read"
pip_name = os.path.basename(g)
logger.info("pushing pip file %s" % g)
with open(g, "rb") as f:
s3.Object(
ai2thor.build.PYPI_S3_BUCKET, os.path.join("ai2thor", pip_name)
).put(Body=f, ACL=acl)
@task
def build_pip_commit(context):
commit_id = git_commit_id()
if os.path.isdir("dist"):
shutil.rmtree("dist")
generate_quality_settings(context)
# must use this form to create valid PEP440 version specifier
version = "0+" + commit_id
with open("ai2thor/_builds.py", "w") as fi:
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("COMMIT_ID = '%s'\n" % commit_id)
with open("ai2thor/_version.py", "w") as fi:
fi.write("# Copyright Allen Institute for Artificial Intelligence 2021\n")
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("__version__ = '%s'\n" % (version))
subprocess.check_call("python setup.py clean --all", shell=True)
subprocess.check_call("python setup.py sdist bdist_wheel --universal", shell=True)
@task
def build_pip(context, version):
import xml.etree.ElementTree as ET
import requests
res = requests.get("https://pypi.org/rss/project/ai2thor/releases.xml")
res.raise_for_status()
root = ET.fromstring(res.content)
latest_version = None
for title in root.findall("./channel/item/title"):
latest_version = title.text
break
# make sure that the tag is on this commit
commit_tags = (
subprocess.check_output("git tag --points-at", shell=True)
.decode("ascii")
.strip()
.split("\n")
)
if version not in commit_tags:
raise Exception("tag %s is not on current commit" % version)
commit_id = git_commit_id()
res = requests.get("https://api.github.com/repos/allenai/ai2thor/commits?sha=main")
res.raise_for_status()
if commit_id not in map(lambda c: c["sha"], res.json()):
raise Exception("tag %s is not off the main branch" % version)
if not re.match(r"^[0-9]{1,3}\.+[0-9]{1,3}\.[0-9]{1,3}$", version):
raise Exception("invalid version: %s" % version)
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
commit_build = ai2thor.build.Build(plat, commit_id, False)
if not commit_build.exists():
raise Exception("Build does not exist for %s/%s" % (commit_id, plat.name()))
current_maj, current_min, current_sub = list(map(int, latest_version.split(".")))
next_maj, next_min, next_sub = list(map(int, version.split(".")))
if (
(next_maj == current_maj + 1)
or (next_maj == current_maj and next_min == current_min + 1)
or (
next_maj == current_maj
and next_min == current_min
and next_sub >= current_sub + 1
)
):
if os.path.isdir("dist"):
shutil.rmtree("dist")
generate_quality_settings(context)
with open("ai2thor/_builds.py", "w") as fi:
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("COMMIT_ID = '%s'\n" % commit_id)
with open("ai2thor/_version.py", "w") as fi:
fi.write("# Copyright Allen Institute for Artificial Intelligence 2021\n")
fi.write("# GENERATED FILE - DO NOT EDIT\n")
fi.write("__version__ = '%s'\n" % (version))
subprocess.check_call("python setup.py clean --all", shell=True)
subprocess.check_call(
"python setup.py sdist bdist_wheel --universal", shell=True
)
else:
raise Exception(
"Invalid version increment: new version=%s,current version=%s; must increment the major, minor or patch by only 1"
% (version, latest_version)
)
@task
def fetch_source_textures(context):
import ai2thor.downloader
zip_data = ai2thor.downloader.download(
"http://s3-us-west-2.amazonaws.com/ai2-thor/assets/source-textures.zip",
"source-textures",
"75476d60a05747873f1173ba2e1dbe3686500f63bcde3fc3b010eea45fa58de7",
)
z = zipfile.ZipFile(io.BytesIO(zip_data))
z.extractall(os.getcwd())
def build_log_push(build_info, include_private_scenes):
if os.path.exists(build_info["log"]):
with open(build_info["log"]) as f:
build_log = f.read() + "\n" + build_info.get("build_exception", "")
else:
build_log = build_info.get("build_exception", "")
build_log_key = "builds/" + build_info["log"]
s3 = boto3.resource("s3")
bucket = ai2thor.build.PUBLIC_S3_BUCKET
acl = "public-read"
if include_private_scenes:
bucket = ai2thor.build.PRIVATE_S3_BUCKET
acl = "private"
s3.Object(bucket, build_log_key).put(
Body=build_log, ACL=acl, ContentType="text/plain"
)
def archive_push(unity_path, build_path, build_dir, build_info, include_private_scenes):
threading.current_thread().success = False
archive_name = os.path.join(unity_path, build_path)
zip_buf = io.BytesIO()
# Unity build is done with CompressWithLz4. Zip with compresslevel=1
# results in smaller builds than Uncompressed Unity + zip comprseslevel=6 (default)
logger.info(f"building zip archive {archive_name} {build_dir}")
zipf = zipfile.ZipFile(zip_buf, "w", zipfile.ZIP_DEFLATED, compresslevel=1)
add_files(zipf, os.path.join(unity_path, build_dir), exclude_ext=(".debug",))
zipf.close()
zip_buf.seek(0)
zip_data = zip_buf.read()
logger.info("generated zip archive %s %s" % (archive_name, len(zip_data)))
push_build(archive_name, zip_data, include_private_scenes)
build_log_push(build_info, include_private_scenes)
print("Build successful")
threading.current_thread().success = True
@task
def pre_test(context):
import ai2thor.controller
c = ai2thor.controller.Controller()
os.makedirs("unity/builds/%s" % c.build_name())
shutil.move(
os.path.join("unity", "builds", c.build_name() + ".app"),
"unity/builds/%s" % c.build_name(),
)
def clean():
import scripts.update_private
# a deploy key is used on the build server and an .ssh/config entry has been added
# to point to the deploy key caclled ai2thor-private-github
scripts.update_private.private_repo_url = (
"git@ai2thor-private-github:allenai/ai2thor-private.git"
)
subprocess.check_call("git reset --hard", shell=True)
subprocess.check_call("git clean -f -d -x", shell=True)
shutil.rmtree("unity/builds", ignore_errors=True)
shutil.rmtree(scripts.update_private.private_dir, ignore_errors=True)
scripts.update_private.checkout_branch()
def ci_prune_cache(cache_dir):
entries = {}
for e in os.scandir(cache_dir):
if os.path.isdir(e.path):
mtime = os.stat(e.path).st_mtime
entries[e.path] = mtime
# keeping the most recent 60 entries (this keeps the cache around 300GB-500GB)
sorted_paths = sorted(entries.keys(), key=lambda x: entries[x])[:-60]
for path in sorted_paths:
if os.path.basename(path) != "main":
logger.info("pruning cache directory: %s" % path)
shutil.rmtree(path)
def link_build_cache(root_dir, arch, branch):
library_path = os.path.join(root_dir, "unity", "Library")
logger.info("linking build cache for %s" % branch)
if os.path.lexists(library_path):
os.unlink(library_path)
# this takes care of branches with '/' in it
# to avoid implicitly creating directories under the cache dir
encoded_branch = re.sub(r"[^a-zA-Z0-9_\-.]", "_", re.sub("_", "__", branch))
cache_base_dir = os.path.join(os.environ["HOME"], "cache")
os.makedirs(cache_base_dir, exist_ok=True)
ci_prune_cache(cache_base_dir)
main_cache_dir = os.path.join(cache_base_dir, "main", arch)
branch_cache_dir = os.path.join(cache_base_dir, encoded_branch, arch)
# use the main cache as a starting point to avoid
# having to re-import all assets, which can take up to 1 hour
if not os.path.exists(branch_cache_dir) and os.path.exists(main_cache_dir):
logger.info("copying main cache for %s" % encoded_branch)
os.makedirs(os.path.dirname(branch_cache_dir), exist_ok=True)
# -c uses MacOS clonefile
subprocess.check_call(
"cp -a -c %s %s" % (main_cache_dir, branch_cache_dir), shell=True
)
logger.info("copying main cache complete for %s" % encoded_branch)
branch_library_cache_dir = os.path.join(branch_cache_dir, "Library")
os.makedirs(branch_library_cache_dir, exist_ok=True)
os.symlink(branch_library_cache_dir, library_path)
# update atime/mtime to simplify cache pruning
os.utime(os.path.join(cache_base_dir, encoded_branch))
def travis_build(build_id):
import requests
res = requests.get(
"https://api.travis-ci.com/build/%s" % build_id,
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Travis-API-Version": "3",
},
)
res.raise_for_status()
return res.json()
def pending_travis_build():
import requests
res = requests.get(
"https://api.travis-ci.com/repo/3459357/builds?include=build.id%2Cbuild.commit%2Cbuild.branch%2Cbuild.request%2Cbuild.created_by%2Cbuild.repository&build.state=started&sort_by=started_at:desc",
headers={
"Accept": "application/json",
"Content-Type": "application/json",
"Travis-API-Version": "3",
},
timeout=10,
)
for b in res.json()["builds"]:
tag = None
if b["tag"]:
tag = b["tag"]["name"]
return {
"branch": b["branch"]["name"],
"commit_id": b["commit"]["sha"],
"tag": tag,
"id": b["id"],
}
def pytest_s3_object(commit_id):
s3 = boto3.resource("s3")
pytest_key = "builds/pytest-%s.json" % commit_id
return s3.Object(ai2thor.build.PUBLIC_S3_BUCKET, pytest_key)
def pytest_s3_general_object(commit_id, filename):
s3 = boto3.resource("s3")
# TODO: Create a new bucket directory for test artifacts
pytest_key = "builds/%s-%s" % (commit_id, filename)
return s3.Object(ai2thor.build.PUBLIC_S3_BUCKET, pytest_key)
# def pytest_s3_data_urls(commit_id):
# test_outputfiles = sorted(
# glob.glob("{}/*".format(TEST_OUTPUT_DIRECTORY))
# )
# logger.info("Getting test data in directory {}".format(os.path.join(os.getcwd(), TEST_OUTPUT_DIRECTORY)))
# logger.info("Test output files: {}".format(", ".join(test_outputfiles)))
# test_data_urls = []
# for filename in test_outputfiles:
# s3_test_out_obj = pytest_s3_general_object(commit_id, filename)
#
# s3_pytest_url = "http://s3-us-west-2.amazonaws.com/%s/%s" % (
# s3_test_out_obj.bucket_name,
# s3_test_out_obj.key,
# )
#
# _, ext = os.path.splitext(filename)
#
# if ext in content_types:
# s3_test_out_obj.put(
# Body=s3_test_out_obj, ACL="public-read", ContentType=content_types[ext]
# )
# logger.info(s3_pytest_url)
# # merged_result["stdout"] += "--- test output url: {}".format(s3_pytest_url)
# test_data_urls.append(s3_pytest_url)
# return test_data_urls
@task
def ci_merge_push_pytest_results(context, commit_id):
s3_obj = pytest_s3_object(commit_id)
s3_pytest_url = "http://s3-us-west-2.amazonaws.com/%s/%s" % (
s3_obj.bucket_name,
s3_obj.key,
)
logger.info(
"ci_merge_push_pytest_results pytest before url check code change logging works"
)
logger.info("pytest url %s" % s3_pytest_url)
logger.info("s3 obj is valid: {}".format(s3_obj))
merged_result = dict(success=True, stdout="", stderr="", test_data=[])
result_files = ["tmp/pytest_results.json", "tmp/test_utf_results.json"]
for rf in result_files:
with open(rf) as f:
result = json.loads(f.read())
merged_result["success"] &= result["success"]
merged_result["stdout"] += result["stdout"] + "\n"
merged_result["stderr"] += result["stderr"] + "\n"
# merged_result["test_data"] = pytest_s3_data_urls(commit_id)
s3_obj.put(
Body=json.dumps(merged_result),
ACL="public-read",
ContentType="application/json",
)
def ci_pytest(branch, commit_id):
logger.info(f"running pytest for {branch} {commit_id}")
start_time = time.time()
proc = subprocess.run(
"pytest", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
result = dict(
success=proc.returncode == 0,
stdout=proc.stdout.decode("ascii"),
stderr=proc.stderr.decode("ascii"),
)
with open("tmp/pytest_results.json", "w") as f:
f.write(json.dumps(result))
logger.info(
f"finished pytest for {branch} {commit_id} in {time.time() - start_time:.2f} seconds"
)
@task
def ci_build(context):
with open(os.path.join(os.environ["HOME"], ".ci-build.lock"), "w") as lock_f:
arch_temp_dirs = dict()
try:
fcntl.flock(lock_f, fcntl.LOCK_EX | fcntl.LOCK_NB)
build = pending_travis_build()
skip_branches = ["vids", "video", "erick/cloudrendering", "it_vr"]
if build and build["branch"] not in skip_branches:
# disabling delete temporarily since it interferes with pip releases
# pytest_s3_object(build["commit_id"]).delete()
logger.info(f"pending build for {build['branch']} {build['commit_id']}")
clean()
subprocess.check_call("git fetch", shell=True)
subprocess.check_call(
"git checkout %s --" % build["branch"], shell=True
)
subprocess.check_call(
"git checkout -qf %s" % build["commit_id"], shell=True
)
private_scene_options = [False]
build_archs = ["OSXIntel64", "Linux64"]
# CloudRendering only supported with 2020.3.25
# should change this in the future to automatically install
# cloudrendering engine if available
if _unity_version() == "2020.3.25f1":
build_archs.append("CloudRendering")
build_archs.reverse() # Let's do CloudRendering first as it's more likely to fail
has_any_build_failed = False
for include_private_scenes in private_scene_options:
for arch in build_archs:
logger.info(
f"processing {arch} {build['branch']} {build['commit_id']}"
)
temp_dir = arch_temp_dirs[arch] = os.path.join(
os.environ["HOME"],
"tmp/unity-%s-%s-%s-%s"
% (
arch,
build["commit_id"],
os.getpid(),
random.randint(0, 2**32 - 1),
),
)
os.makedirs(temp_dir)
logger.info(f"copying unity data to {temp_dir}")
# -c uses MacOS clonefile
subprocess.check_call(f"cp -a -c unity {temp_dir}", shell=True)
logger.info(f"completed unity data copy to {temp_dir}")
rdir = os.path.join(temp_dir, "unity/builds")
commit_build = ai2thor.build.Build(
platform=arch,
commit_id=build["commit_id"],
include_private_scenes=include_private_scenes,
releases_dir=rdir,
)
if commit_build.exists():
logger.info(
f"found build for commit {build['commit_id']} {arch}"
)
# download the build so that we can run the tests
if arch == "OSXIntel64":
commit_build.download()
else:
# this is done here so that when a tag build request arrives and the commit_id has already
# been built, we avoid bootstrapping the cache since we short circuited on the line above
link_build_cache(
root_dir=temp_dir, arch=arch, branch=build["branch"]
)
build_success = ci_build_arch(
root_dir=temp_dir,
arch=arch,
commit_id=build["commit_id"],
include_private_scenes=include_private_scenes,
immediately_fail_and_push_log=has_any_build_failed,
timeout=60 * 60
# Don't bother trying another build if one has already failed
)
has_any_build_failed = (
has_any_build_failed or not build_success
)
if build_success:
logger.info(
f"Build success detected for {arch} {build['commit_id']}"
)
else:
logger.error(f"Build failed for {arch}")
# the UnityLockfile is used as a trigger to indicate that Unity has closed
# the project and we can run the unit tests
# waiting for all builds to complete before starting tests
for arch in build_archs:
lock_file_path = os.path.join(
arch_temp_dirs[arch], "unity/Temp/UnityLockfile"
)
if os.path.isfile(lock_file_path):
logger.info(f"attempting to lock {lock_file_path}")
lock_file = os.open(lock_file_path, os.O_RDWR)
fcntl.lockf(lock_file, fcntl.LOCK_EX)
fcntl.lockf(lock_file, fcntl.LOCK_UN)
os.close(lock_file)
logger.info(f"obtained lock on {lock_file_path}")
# don't run tests for a tag since results should exist
# for the branch commit
procs = []
if build["tag"] is None:
# its possible that the cache doesn't get linked if the builds
# succeeded during an earlier run
link_build_cache(
arch_temp_dirs["OSXIntel64"], "OSXIntel64", build["branch"]
)
# link builds directory so pytest can run
logger.info("current directory pre-symlink %s" % os.getcwd())
os.symlink(
os.path.join(arch_temp_dirs["OSXIntel64"], "unity/builds"),
"unity/builds",
)
os.makedirs("tmp", exist_ok=True)
# using threading here instead of multiprocessing since we must use the start_method of spawn, which
# causes the tasks.py to get reloaded, which may be different on a branch from main
utf_proc = threading.Thread(
target=ci_test_utf,
args=(
build["branch"],
build["commit_id"],
arch_temp_dirs["OSXIntel64"],
),
)
utf_proc.start()
procs.append(utf_proc)
pytest_proc = threading.Thread(
target=ci_pytest, args=(build["branch"], build["commit_id"])
)
pytest_proc.start()
procs.append(pytest_proc)
## allow webgl to be force deployed with #webgl-deploy in the commit comment
if (
build["branch"] in ["main", "demo-updates"]
and "#webgl-deploy" in git_commit_comment()
):
ci_build_webgl(context, build["commit_id"])
for p in procs:
if p:
logger.info(
"joining proc %s for %s %s"
% (p, build["branch"], build["commit_id"])
)
p.join()
if build["tag"] is None:
ci_merge_push_pytest_results(context, build["commit_id"])
# must have this after all the procs are joined
# to avoid generating a _builds.py file that would affect pytest execution
build_pip_commit(context)
push_pip_commit(context)
generate_pypi_index(context)
# give the travis poller time to see the result
for i in range(12):
b = travis_build(build["id"])
logger.info("build state for %s: %s" % (build["id"], b["state"]))
if b["state"] != "started":
break
time.sleep(10)
logger.info(
"build complete %s %s" % (build["branch"], build["commit_id"])
)
fcntl.flock(lock_f, fcntl.LOCK_UN)
except io.BlockingIOError as e:
pass
finally:
for arch, temp_dir in arch_temp_dirs.items():
logger.info("deleting temp dir %s" % temp_dir)
shutil.rmtree(temp_dir)
@task
def install_cloudrendering_engine(context, force=False):
if not sys.platform.startswith("darwin"):
raise Exception("CloudRendering Engine can only be installed on Mac")
s3 = boto3.resource("s3")
target_base_dir = "/Applications/Unity/Hub/Editor/{}/PlaybackEngines".format(
_unity_version()
)
full_dir = os.path.join(target_base_dir, "CloudRendering")
if os.path.isdir(full_dir):
if force:
shutil.rmtree(full_dir)
logger.info(
"CloudRendering engine already installed - removing due to force"
)
else:
logger.info(
"skipping installation - CloudRendering engine already installed"
)
return
print("packages/CloudRendering-%s.zip" % _unity_version())
res = s3.Object(
ai2thor.build.PRIVATE_S3_BUCKET,
"packages/CloudRendering-%s.zip" % _unity_version(),
).get()
data = res["Body"].read()
z = zipfile.ZipFile(io.BytesIO(data))
z.extractall(target_base_dir)
@task
def ci_build_webgl(context, commit_id):
branch = "main"
logger.info("starting auto-build webgl build deploy %s %s" % (branch, commit_id))
# linking here in the event we didn't link above since the builds had
# already completed. Omitting this will cause the webgl build
# to import all assets from scratch into a new unity/Library
arch = "WebGL"
set_gi_cache_folder(arch)
link_build_cache(os.getcwd(), arch, branch)
webgl_build_deploy_demo(
context, verbose=True, content_addressable=False, force=True
)
logger.info("finished webgl build deploy %s %s" % (branch, commit_id))
update_webgl_autodeploy_commit_id(commit_id)
def set_gi_cache_folder(arch):
gi_cache_folder = os.path.join(os.environ["HOME"], "GICache/%s" % arch)
plist_path = os.path.join(
os.environ["HOME"], "Library/Preferences/com.unity3d.UnityEditor5.x.plist"
)
# done to avoid race conditions when modifying GICache from more than one build
subprocess.check_call(
"plutil -replace GICacheEnableCustomPath -bool TRUE %s" % plist_path, shell=True
)
subprocess.check_call(
"plutil -replace GICacheFolder -string '%s' %s" % (gi_cache_folder, plist_path),
shell=True,
)
subprocess.check_call(
"plutil -replace GICacheMaximumSizeGB -integer 100 %s" % (plist_path,),
shell=True,
)
def ci_build_arch(
root_dir: str,
arch: str,
commit_id: str,
include_private_scenes=False,
immediately_fail_and_push_log: bool = False,
timeout: int = 60 * 60,
):
start_wd = os.getcwd()
try:
os.chdir(root_dir)
unity_path = "unity"
build_name = ai2thor.build.build_name(arch, commit_id, include_private_scenes)
build_dir = os.path.join("builds", build_name)
build_path = build_dir + ".zip"
build_info = {}
start_time = time.time()
try:
build_info["log"] = f"{build_name}.log"
if immediately_fail_and_push_log:
raise ForcedFailure(
f"Build for {arch} {commit_id} was told to fail immediately, likely because a build for"
f" another architecture already failed."
)
env = {}
if include_private_scenes:
env["INCLUDE_PRIVATE_SCENES"] = "true"
set_gi_cache_folder(arch)
logger.info(f"Starting build for {arch} {commit_id}")
success = _build(
unity_path=unity_path,
arch=arch,
build_dir=build_dir,
build_name=build_name,
env=env,
timeout=timeout,
)
logger.info(
f"Finished build for {arch} {commit_id}, took {(time.time() - start_time) / 60:.2f} minutes. Success: {success}"
)
if not success:
raise Exception(f"Build for {arch} {commit_id} failed.")
archive_push(
unity_path=unity_path,
build_path=build_path,
build_dir=build_dir,
build_info=build_info,
include_private_scenes=include_private_scenes,
)
return True
except Exception as e:
logger.info(
f"Caught exception when building {arch} {commit_id} after {(time.time() - start_time) / 60:.2f} minutes: {e}"
)
build_info["build_exception"] = f"Exception building: {e}"
build_log_push(build_info, include_private_scenes)
return False
finally:
os.chdir(start_wd)
@task
def poll_ci_build(context):
import requests
import datetime
commit_id = git_commit_id()
start_datetime = datetime.datetime.utcnow()
hours_before_timeout = 2
print(
f"WAITING FOR BUILDS TO COMPLETE ({hours_before_timeout} hours before timeout)"
)
start_time = time.time()
last_emit_time = 0
for i in range(360 * hours_before_timeout):
log_exist_count = 0
# must emit something at least once every 10 minutes
# otherwise travis will time out the build
if (time.time() - last_emit_time) > 120:
print(".", end="")
last_emit_time = time.time()
check_platforms = ai2thor.build.AUTO_BUILD_PLATFORMS
for plat in check_platforms:
commit_build = ai2thor.build.Build(plat, commit_id, False)
try:
res = requests.head(commit_build.log_url)
if res.status_code == 200:
last_modified = datetime.datetime.strptime(
res.headers["Last-Modified"], "%a, %d %b %Y %H:%M:%S GMT"
)
# if a build is restarted, a log from a previous build will exist
# but its last-modified date will precede the start datetime
if last_modified > start_datetime or commit_build.exists():
log_exist_count += 1
# we observe errors when polling AWS periodically - we don't want these to stop
# the build
except requests.exceptions.ConnectionError as e:
print(f"Caught exception when polling AWS, ignoring (error: {e})")
if log_exist_count == len(check_platforms):
break
sys.stdout.flush()
time.sleep(10)
print(f"\nCHECKING TOOK {(time.time() - start_time) / 60:.2f} minutes")
print("\nCHECKING IF ALL BUILDS SUCCESSFULLY UPLOADED")
any_failures = False
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
ai2thor.build.Build(plat, commit_id, False)
commit_build = ai2thor.build.Build(plat, commit_id, False)
success = commit_build.exists()
any_failures = any_failures or not success
if not success:
print(
f"\nBuild DOES NOT exist for arch {plat}, expected log url: {commit_build.log_url}"
)
else:
print(
f"\nBuild DOES exist for arch {plat}, log url: {commit_build.log_url}"
)
if any_failures:
print(f"\nERROR: BUILD FAILURES DETECTED")
sys.exit(1)
print("\nRETRIEVING `pytest` RESULTS")
pytest_missing = True
for i in range(30):
if (time.time() - last_emit_time) > 120:
print(".", end="")
last_emit_time = time.time()
s3_obj = pytest_s3_object(commit_id)
s3_pytest_url = "http://s3-us-west-2.amazonaws.com/%s/%s" % (
s3_obj.bucket_name,
s3_obj.key,
)
res = requests.get(s3_pytest_url)
if res.status_code == 200:
pytest_missing = False
pytest_result = res.json()
print(pytest_result["stdout"]) # print so that it appears in travis log
print(pytest_result["stderr"])
if "test_data" in pytest_result:
print(f"Pytest url: {s3_pytest_url}")
print("Data urls: ")
print(", ".join(pytest_result["test_data"]))
else:
print(f"No test data url's in json '{s3_pytest_url}'.")
if not pytest_result["success"]:
print("ERROR: pytest FAILURE")
sys.exit(1)
break
time.sleep(10)
if pytest_missing:
print("\nERROR: MISSING pytest OUTPUT")
sys.exit(1)
@task
def build(context, local=False):
version = datetime.datetime.now().strftime("%Y%m%d%H%M")
builds = {"Docker": {"tag": version}}
threads = []
for include_private_scenes in (True, False):
for plat in ai2thor.build.AUTO_BUILD_PLATFORMS:
env = {}
if include_private_scenes:
env["INCLUDE_PRIVATE_SCENES"] = "true"
unity_path = "unity"
build_name = ai2thor.build.build_name(
plat.name(), version, include_private_scenes
)
build_dir = os.path.join("builds", build_name)
build_path = build_dir + ".zip"
build_info = builds[plat.name()] = {}
build_info["log"] = "%s.log" % (build_name,)
_build(unity_path, plat.name(), build_dir, build_name, env=env)
t = threading.Thread(
target=archive_push,
args=(
unity_path,
build_path,
build_dir,
build_info,
include_private_scenes,
),
)
t.start()
threads.append(t)
# dp.join()
# if dp.exitcode != 0:
# raise Exception("Exception with docker build")
for t in threads:
t.join()
if not t.success:
raise Exception("Error with thread")
generate_quality_settings(context)
@task
def interact(
ctx,
scene,
editor_mode=False,
local_build=False,
image=False,
depth_image=False,
class_image=False,
object_image=False,
metadata=False,
robot=False,
port=8200,
host="127.0.0.1",
image_directory=".",
width=300,
height=300,
include_private_scenes=False,
noise=False,
):
import ai2thor.controller
import ai2thor.robot_controller
if image_directory != ".":
if os.path.exists(image_directory):
shutil.rmtree(image_directory)
os.makedirs(image_directory)
if not robot:
env = ai2thor.controller.Controller(
host=host,
port=port,
width=width,
height=height,
local_build=local_build,
image_dir=image_directory,
start_unity=False if editor_mode else True,
save_image_per_frame=True,
include_private_scenes=include_private_scenes,
add_depth_noise=noise,
scene=scene,
)
else:
env = ai2thor.robot_controller.Controller(
host=host,
port=port,
width=width,
height=height,
image_dir=image_directory,
save_image_per_frame=True,
)
env.reset(scene)
initialize_event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=object_image,
renderSemanticSegmentation=class_image,
renderDepthImage=depth_image,
)
)
from ai2thor.interact import InteractiveControllerPrompt
InteractiveControllerPrompt.write_image(
initialize_event,
image_directory,
"_init",
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
env.interact(
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
depth_frame=depth_image,
color_frame=image,
metadata=metadata,
)
env.stop()
@task
def get_depth(
ctx,
scene=None,
image=False,
depth_image=False,
class_image=False,
object_image=False,
metadata=False,
port=8200,
host="127.0.0.1",
image_directory=".",
number=1,
local_build=False,
teleport=None,
rotation=0,
):
import ai2thor.controller
import ai2thor.robot_controller
if image_directory != ".":
if os.path.exists(image_directory):
shutil.rmtree(image_directory)
os.makedirs(image_directory)
if scene is None:
env = ai2thor.robot_controller.Controller(
host=host,
port=port,
width=600,
height=600,
image_dir=image_directory,
save_image_per_frame=True,
)
else:
env = ai2thor.controller.Controller(
width=600, height=600, local_build=local_build
)
if scene is not None:
env.reset(scene)
initialize_event = env.step(
dict(
action="Initialize",
gridSize=0.25,
renderInstanceSegmentation=object_image,
renderSemanticSegmentation=class_image,
renderDepthImage=depth_image,
agentMode="locobot",
fieldOfView=59,
continuous=True,
snapToGrid=False,
)
)
from ai2thor.interact import InteractiveControllerPrompt
if scene is not None:
teleport_arg = dict(
action="TeleportFull", y=0.9010001, rotation=dict(x=0, y=rotation, z=0)
)
if teleport is not None:
teleport = [float(pos) for pos in teleport.split(",")]
t_size = len(teleport)
if 1 <= t_size:
teleport_arg["x"] = teleport[0]
if 2 <= t_size:
teleport_arg["z"] = teleport[1]
if 3 <= t_size:
teleport_arg["y"] = teleport[2]
evt = env.step(teleport_arg)
InteractiveControllerPrompt.write_image(
evt,
image_directory,
"_{}".format("teleport"),
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
InteractiveControllerPrompt.write_image(
initialize_event,
image_directory,
"_init",
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
for i in range(number):
event = env.step(action="MoveAhead", moveMagnitude=0.0)
InteractiveControllerPrompt.write_image(
event,
image_directory,
"_{}".format(i),
image_per_frame=True,
semantic_segmentation_frame=class_image,
instance_segmentation_frame=object_image,
color_frame=image,
depth_frame=depth_image,
metadata=metadata,
)
env.stop()
@task
def inspect_depth(
ctx, directory, all=False, indices=None, jet=False, under_score=False
):
import numpy as np
import cv2
import glob
under_prefix = "_" if under_score else ""
regex_str = "depth{}(.*)\.png".format(under_prefix)
def sort_key_function(name):
split_name = name.split("/")
x = re.search(regex_str, split_name[len(split_name) - 1]).group(1)
try:
val = int(x)
return val
except ValueError:
return -1
if indices is None or all:
images = sorted(
glob.glob("{}/depth{}*.png".format(directory, under_prefix)),
key=sort_key_function,
)
print(images)
else:
images = ["depth{}{}.png".format(under_prefix, i) for i in indices.split(",")]
for depth_filename in images:
# depth_filename = os.path.join(directory, "depth_{}.png".format(index))
split_fn = depth_filename.split("/")
index = re.search(regex_str, split_fn[len(split_fn) - 1]).group(1)
print("index {}".format(index))
print("Inspecting: '{}'".format(depth_filename))
depth_raw_filename = os.path.join(
directory, "depth_raw{}{}.npy".format("_" if under_score else "", index)
)
raw_depth = np.load(depth_raw_filename)
if jet:
mn = np.min(raw_depth)
mx = np.max(raw_depth)
print("min depth value: {}, max depth: {}".format(mn, mx))
norm = (((raw_depth - mn).astype(np.float32) / (mx - mn)) * 255.0).astype(
np.uint8
)
img = cv2.applyColorMap(norm, cv2.COLORMAP_JET)
else:
grayscale = (
255.0 / raw_depth.max() * (raw_depth - raw_depth.min())
).astype(np.uint8)
print("max {} min {}".format(raw_depth.max(), raw_depth.min()))
img = grayscale
print(raw_depth.shape)
def inspect_pixel(event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
print("Pixel at x: {}, y: {} ".format(y, x))
print(raw_depth[y][x])
cv2.namedWindow("image")
cv2.setMouseCallback("image", inspect_pixel)
cv2.imshow("image", img)
cv2.waitKey(0)
@task
def real_2_sim(
ctx, source_dir, index, scene, output_dir, rotation=0, local_build=False, jet=False
):
import cv2
from ai2thor.util.transforms import transform_real_2_sim
depth_metadata_fn = os.path.join(source_dir, "metadata_{}.json".format(index))
color_real_fn = os.path.join(source_dir, "color_{}.png".format(index))
color_sim_fn = os.path.join(output_dir, "color_teleport.png".format(index))
with open(depth_metadata_fn, "r") as f:
metadata = json.load(f)
pos = metadata["agent"]["position"]
sim_pos = transform_real_2_sim(pos)
teleport_arg = "{},{},{}".format(sim_pos["x"], sim_pos["z"], sim_pos["y"])
print(sim_pos)
print(teleport_arg)
inspect_depth(ctx, source_dir, indices=index, under_score=True, jet=jet)
get_depth(
ctx,
scene=scene,
image=True,
depth_image=True,
class_image=False,
object_image=False,
metadata=True,
image_directory=output_dir,
number=1,
local_build=local_build,
teleport=teleport_arg,
rotation=rotation,
)
im = cv2.imread(color_real_fn)
cv2.imshow("color_real.png", im)
im2 = cv2.imread(color_sim_fn)
cv2.imshow("color_sim.png", im2)
inspect_depth(ctx, output_dir, indices="teleport", under_score=True, jet=jet)
@task
def noise_depth(ctx, directory, show=False):
import glob
import cv2
import numpy as np
def imshow_components(labels):
# Map component labels to hue val
label_hue = np.uint8(179 * labels / np.max(labels))
blank_ch = 255 * np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue == 0] = 0
if show:
cv2.imshow("labeled.png", labeled_img)
cv2.waitKey()
images = glob.glob("{}/depth_*.png".format(directory))
indices = []
for image_file in images:
print(image_file)
grayscale_img = cv2.imread(image_file, 0)
img = grayscale_img
img_size = img.shape
img = cv2.threshold(img, 30, 255, cv2.THRESH_BINARY_INV)[1]
ret, labels = cv2.connectedComponents(img)
print("Components: {}".format(ret))
imshow_components(labels)
print(img_size[0])
indices_top_left = np.where(labels == labels[0][0])
indices_top_right = np.where(labels == labels[0][img_size[1] - 1])
indices_bottom_left = np.where(labels == labels[img_size[0] - 1][0])
indices_bottom_right = np.where(
labels == labels[img_size[0] - 1][img_size[1] - 1]
)
indices = [
indices_top_left,
indices_top_right,
indices_bottom_left,
indices_bottom_right,
]
blank_image = np.zeros((300, 300, 1), np.uint8)
blank_image.fill(255)
blank_image[indices_top_left] = 0
blank_image[indices_top_right] = 0
blank_image[indices_bottom_left] = 0
blank_image[indices_bottom_right] = 0
if show:
cv2.imshow("labeled.png", blank_image)
cv2.waitKey()
break
compressed = []
for indices_arr in indices:
unique_e, counts = np.unique(indices_arr[0], return_counts=True)
compressed.append(counts)
np.save("depth_noise", compressed)
@task
def release(ctx):
x = subprocess.check_output("git status --porcelain", shell=True).decode("ASCII")
for line in x.split("\n"):
if line.strip().startswith("??") or len(line.strip()) == 0:
continue
raise Exception(
"Found locally modified changes from 'git status' - please commit and push or revert"
)
import ai2thor._version
tag = "v" + ai2thor._version.__version__
subprocess.check_call('git tag -a %s -m "release %s"' % (tag, tag), shell=True)
subprocess.check_call("git push origin main --tags", shell=True)
subprocess.check_call(
"twine upload -u ai2thor dist/ai2thor-{ver}-* dist/ai2thor-{ver}.*".format(
ver=ai2thor._version.__version__
),
shell=True,
)
@task
def check_visible_objects_closed_receptacles(ctx, start_scene, end_scene):
from itertools import product
import ai2thor.controller
controller = ai2thor.controller.BFSController()
controller.start()
for i in range(int(start_scene), int(end_scene)):
print("working on floorplan %s" % i)
controller.search_all_closed("FloorPlan%s" % i)
visibility_object_id = None
visibility_object_types = ["Mug", "CellPhone", "SoapBar"]
for obj in controller.last_event.metadata["objects"]:
if obj["pickupable"]:
controller.step(
action=dict(
action="PickupObject",
objectId=obj["objectId"],
forceVisible=True,
)
)
if (
visibility_object_id is None
and obj["objectType"] in visibility_object_types
):
visibility_object_id = obj["objectId"]
if visibility_object_id is None:
raise Exception("Couldn't get a visibility_object")
bad_receptacles = set()
for point in controller.grid_points:
controller.step(
dict(action="Teleport", x=point["x"], y=point["y"], z=point["z"]),
raise_for_failure=True,
)
for rot, hor in product(controller.rotations, controller.horizons):
event = controller.step(
dict(action="RotateLook", rotation=rot, horizon=hor),
raise_for_failure=True,
)
for j in event.metadata["objects"]:
if j["receptacle"] and j["visible"] and j["openable"]:
controller.step(
action=dict(
action="Replace",
forceVisible=True,
pivot=0,
receptacleObjectId=j["objectId"],
objectId=visibility_object_id,
)
)
replace_success = controller.last_event.metadata[
"lastActionSuccess"
]
if replace_success:
if (
controller.is_object_visible(visibility_object_id)
and j["objectId"] not in bad_receptacles
):
bad_receptacles.add(j["objectId"])
print("Got bad receptacle: %s" % j["objectId"])
# import cv2
# cv2.imshow('aoeu', controller.last_event.cv2image())
# cv2.waitKey(0)
controller.step(
action=dict(
action="PickupObject",
objectId=visibility_object_id,
forceVisible=True,
)
)
def list_objects_with_metadata(bucket):
keys = {}
s3c = boto3.client("s3")
continuation_token = None
while True:
if continuation_token:
objects = s3c.list_objects_v2(
Bucket=bucket, ContinuationToken=continuation_token
)
else:
objects = s3c.list_objects_v2(Bucket=bucket)
for i in objects.get("Contents", []):
keys[i["Key"]] = i
if "NextContinuationToken" in objects:
continuation_token = objects["NextContinuationToken"]
else:
break
return keys
def s3_etag_data(data):
h = hashlib.md5()
h.update(data)
return '"' + h.hexdigest() + '"'
cache_seconds = 31536000
@task
def webgl_deploy(
ctx,
bucket=ai2thor.build.PUBLIC_WEBGL_S3_BUCKET,
prefix="local",
source_dir="builds",
target_dir="",
verbose=False,
force=False,
extensions_no_cache="",
):
from pathlib import Path
from os.path import isfile, join, isdir
content_encoding = {".unityweb": "gzip"}
bucket_name = bucket
s3 = boto3.resource("s3")
current_objects = list_objects_with_metadata(bucket_name)
no_cache_extensions = {".txt", ".html", ".json", ".js"}
no_cache_extensions.union(set(extensions_no_cache.split(",")))
def walk_recursive(path, func, parent_dir=""):
for file_name in os.listdir(path):
f_path = join(path, file_name)
relative_path = join(parent_dir, file_name)
if isfile(f_path):
key = Path(join(target_dir, relative_path))
func(f_path, key.as_posix())
elif isdir(f_path):
walk_recursive(f_path, func, relative_path)
def upload_file(f_path, key):
_, ext = os.path.splitext(f_path)
if verbose:
print("'{}'".format(key))
with open(f_path, "rb") as f:
file_data = f.read()
etag = s3_etag_data(file_data)
kwargs = {}
if ext in content_encoding:
kwargs["ContentEncoding"] = content_encoding[ext]
if (
not force
and key in current_objects
and etag == current_objects[key]["ETag"]
):
if verbose:
print("ETag match - skipping %s" % key)
return
if ext in content_types:
cache = (
"no-cache, no-store, must-revalidate"
if ext in no_cache_extensions
else "public, max-age={}".format(cache_seconds)
)
now = datetime.datetime.utcnow()
expires = (
now
if ext == ".html" or ext == ".txt"
else now + datetime.timedelta(seconds=cache_seconds)
)
s3.Object(bucket_name, key).put(
Body=file_data,
ACL="public-read",
ContentType=content_types[ext],
CacheControl=cache,
Expires=expires,
**kwargs,
)
else:
if verbose:
print(
"Warning: Content type for extension '{}' not defined,"
" uploading with no content type".format(ext)
)
s3.Object(bucket_name, key).put(Body=f.read(), ACL="public-read")
if prefix is not None:
build_path = _webgl_local_build_path(prefix, source_dir)
else:
build_path = source_dir
if verbose:
print("Build path: '{}'".format(build_path))
print("Uploading...")
walk_recursive(build_path, upload_file)
@task
def webgl_build_deploy_demo(ctx, verbose=False, force=False, content_addressable=False):
# Main demo
demo_selected_scene_indices = [
1,
3,
7,
29,
30,
204,
209,
221,
224,
227,
301,
302,
308,
326,
330,
401,
403,
411,
422,
430,
]
scenes = ["FloorPlan{}_physics".format(x) for x in demo_selected_scene_indices]
webgl_build(
ctx,
scenes=",".join(scenes),
directory="builds/demo",
content_addressable=content_addressable,
)
webgl_deploy(
ctx, source_dir="builds/demo", target_dir="demo", verbose=verbose, force=force
)
if verbose:
print("Deployed selected scenes to bucket's 'demo' directory")
# Full framework demo
kitchens = [f"FloorPlan{i}_physics" for i in range(1, 31)]
living_rooms = [f"FloorPlan{200 + i}_physics" for i in range(1, 31)]
bedrooms = [f"FloorPlan{300 + i}_physics" for i in range(1, 31)]
bathrooms = [f"FloorPlan{400 + i}_physics" for i in range(1, 31)]
robothor_train = [
f"FloorPlan_Train{i}_{j}" for i in range(1, 13) for j in range(1, 6)
]
robothor_val = [f"FloorPlan_Val{i}_{j}" for i in range(1, 4) for j in range(1, 6)]
scenes = (
kitchens + living_rooms + bedrooms + bathrooms + robothor_train + robothor_val
)
webgl_build(
ctx,
scenes=",".join(scenes),
content_addressable=content_addressable,
)
webgl_deploy(ctx, verbose=verbose, force=force, target_dir="full")
if verbose:
print("Deployed all scenes to bucket's root.")
def current_webgl_autodeploy_commit_id():
s3 = boto3.resource("s3")
try:
res = s3.Object(ai2thor.build.PUBLIC_WEBGL_S3_BUCKET, "autodeploy.json").get()
return json.loads(res["Body"].read())["commit_id"]
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "NoSuchKey":
return None
else:
raise e
def update_webgl_autodeploy_commit_id(commit_id):
s3 = boto3.resource("s3")
s3.Object(ai2thor.build.PUBLIC_WEBGL_S3_BUCKET, "autodeploy.json").put(
Body=json.dumps(dict(timestamp=time.time(), commit_id=commit_id)),
ContentType="application/json",
)
@task
def webgl_deploy_all(ctx, verbose=False, individual_rooms=False):
rooms = {
"kitchens": (1, 30),
"livingRooms": (201, 230),
"bedrooms": (301, 330),
"bathrooms": (401, 430),
"foyers": (501, 530),
}
for key, room_range in rooms.items():
range_str = "{}-{}".format(room_range[0], room_range[1])
if verbose:
print("Building for rooms: {}".format(range_str))
build_dir = "builds/{}".format(key)
if individual_rooms:
for i in range(room_range[0], room_range[1]):
floorPlanName = "FloorPlan{}_physics".format(i)
target_s3_dir = "{}/{}".format(key, floorPlanName)
build_dir = "builds/{}".format(target_s3_dir)
webgl_build(ctx, scenes=floorPlanName, directory=build_dir)
webgl_deploy(
ctx, source_dir=build_dir, target_dir=target_s3_dir, verbose=verbose
)
else:
webgl_build(ctx, room_ranges=range_str, directory=build_dir)
webgl_deploy(ctx, source_dir=build_dir, target_dir=key, verbose=verbose)
@task
def webgl_s3_deploy(
ctx, bucket, target_dir, scenes="", verbose=False, all=False, deploy_skip=False
):
"""
Builds and deploys a WebGL unity site
:param context:
:param target_dir: Target s3 bucket
:param target_dir: Target directory in bucket
:param scenes: String of scene numbers to include in the build as a comma separated list e.g. "4,6,230"
:param verbose: verbose build
:param all: overrides 'scenes' parameter and builds and deploys all separate rooms
:param deploy_skip: Whether to skip deployment and do build only.
:return:
"""
rooms = {
"kitchens": (1, 30),
"livingRooms": (201, 230),
"bedrooms": (301, 330),
"bathrooms": (401, 430),
}
if all:
flatten = lambda l: [item for sublist in l for item in sublist]
room_numbers = flatten(
[
[i for i in range(room_range[0], room_range[1])]
for key, room_range in rooms.items()
]
)
else:
room_numbers = [s.strip() for s in scenes.split(",")]
if verbose:
print("Rooms in build: '{}'".format(room_numbers))
for i in room_numbers:
floor_plan_name = "FloorPlan{}_physics".format(i)
if verbose:
print("Building room '{}'...".format(floor_plan_name))
target_s3_dir = "{}/{}".format(target_dir, floor_plan_name)
build_dir = "builds/{}".format(target_s3_dir)
webgl_build(
ctx, scenes=floor_plan_name, directory=build_dir, crowdsource_build=True
)
if verbose:
print("Deploying room '{}'...".format(floor_plan_name))
if not deploy_skip:
webgl_deploy(
ctx,
bucket=bucket,
source_dir=build_dir,
target_dir=target_s3_dir,
verbose=verbose,
extensions_no_cache=".css",
)
@task
def webgl_site_deploy(
context,
template_name,
output_dir,
bucket,
unity_build_dir="",
s3_target_dir="",
force=False,
verbose=False,
):
from pathlib import Path
from os.path import isfile, join
template_dir = Path("unity/Assets/WebGLTemplates/{}".format(template_name))
if os.path.exists(output_dir):
shutil.rmtree(output_dir)
# os.mkdir(output_dir)
ignore_func = lambda d, files: [
f for f in files if isfile(join(d, f)) and f.endswith(".meta")
]
if unity_build_dir != "":
shutil.copytree(unity_build_dir, output_dir, ignore=ignore_func)
# shutil.copytree(os.path.join(unity_build_dir, "Build"), os.path.join(output_dir, "Build"), ignore=ignore_func)
else:
shutil.copytree(template_dir, output_dir, ignore=ignore_func)
webgl_deploy(
context,
bucket=bucket,
prefix=None,
source_dir=output_dir,
target_dir=s3_target_dir,
verbose=verbose,
force=force,
extensions_no_cache=".css",
)
@task
def mock_client_request(context):
import msgpack
import numpy as np
import requests
import cv2
r = requests.post(
"http://127.0.0.1:9200/step", json=dict(action="MoveAhead", sequenceId=1)
)
payload = msgpack.unpackb(r.content, raw=False)
metadata = payload["metadata"]["agents"][0]
image = np.frombuffer(payload["frames"][0], dtype=np.uint8).reshape(
metadata["screenHeight"], metadata["screenWidth"], 3
)
pprint.pprint(metadata)
cv2.imshow("aoeu", image)
cv2.waitKey(1000)
@task
def start_mock_real_server(context):
import ai2thor.mock_real_server
m = ai2thor.mock_real_server.MockServer(height=300, width=300)
print("Started mock server on port: http://" + m.host + ":" + str(m.port))
m.start()
@task
def create_robothor_dataset(
context,
local_build=False,
editor_mode=False,
width=300,
height=300,
output="robothor-dataset.json",
intermediate_directory=".",
visibility_distance=1.0,
objects_filter=None,
scene_filter=None,
filter_file=None,
):
"""
Creates a dataset for the robothor challenge in `intermediate_directory`
named `robothor-dataset.json`
"""
import ai2thor.controller
import ai2thor.util.metrics as metrics
scene = "FloorPlan_Train1_1"
angle = 45
gridSize = 0.25
# Restrict points visibility_multiplier_filter * visibility_distance away from the target object
visibility_multiplier_filter = 2
scene_object_filter = {}
if filter_file is not None:
with open(filter_file, "r") as f:
scene_object_filter = json.load(f)
print("Filter:")
pprint.pprint(scene_object_filter)
print("Visibility distance: {}".format(visibility_distance))
controller = ai2thor.controller.Controller(
width=width,
height=height,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=visibility_distance,
)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Remote",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
failed_points = []
if objects_filter is not None:
obj_filter = set([o for o in objects_filter.split(",")])
targets = [o for o in targets if o.replace(" ", "") in obj_filter]
desired_points = 30
event = controller.step(
dict(
action="GetScenesInBuild",
)
)
scenes_in_build = event.metadata["actionReturn"]
objects_types_in_scene = set()
def sqr_dist(a, b):
x = a[0] - b[0]
z = a[2] - b[2]
return x * x + z * z
def sqr_dist_dict(a, b):
x = a["x"] - b["x"]
z = a["z"] - b["z"]
return x * x + z * z
def get_points(contoller, object_type, scene):
print("Getting points in scene: '{}'...: ".format(scene))
controller.reset(scene)
event = controller.step(
dict(
action="ObjectTypeToObjectIds", objectType=object_type.replace(" ", "")
)
)
object_ids = event.metadata["actionReturn"]
if object_ids is None or len(object_ids) > 1 or len(object_ids) == 0:
print("Object type '{}' not available in scene.".format(object_type))
return None
objects_types_in_scene.add(object_type)
object_id = object_ids[0]
event_reachable = controller.step(
dict(action="GetReachablePositions", gridSize=0.25)
)
target_position = controller.step(
action="GetObjectPosition", objectId=object_id
).metadata["actionReturn"]
reachable_positions = event_reachable.metadata["actionReturn"]
reachable_pos_set = set(
[
(pos["x"], pos["y"], pos["z"])
for pos in reachable_positions
# if sqr_dist_dict(pos, target_position) >= visibility_distance * visibility_multiplier_filter
]
)
def filter_points(selected_points, point_set, minimum_distance):
result = set()
for selected in selected_points:
if selected in point_set:
result.add(selected)
remove_set = set(
[
p
for p in point_set
if sqr_dist(p, selected)
<= minimum_distance * minimum_distance
]
)
point_set = point_set.difference(remove_set)
return result
import random
points = random.sample(reachable_pos_set, desired_points * 4)
final_point_set = filter_points(points, reachable_pos_set, gridSize * 2)
print("Total number of points: {}".format(len(final_point_set)))
print("Id {}".format(event.metadata["actionReturn"]))
point_objects = []
eps = 0.0001
counter = 0
for x, y, z in final_point_set:
possible_orientations = [0, 90, 180, 270]
pos_unity = dict(x=x, y=y, z=z)
try:
path = metrics.get_shortest_path_to_object(
controller, object_id, pos_unity, {"x": 0, "y": 0, "z": 0}
)
minimum_path_length = metrics.path_distance(path)
rotation_allowed = False
while not rotation_allowed:
if len(possible_orientations) == 0:
break
roatation_y = random.choice(possible_orientations)
possible_orientations.remove(roatation_y)
evt = controller.step(
action="TeleportFull",
x=pos_unity["x"],
y=pos_unity["y"],
z=pos_unity["z"],
rotation=dict(x=0, y=roatation_y, z=0),
)
rotation_allowed = evt.metadata["lastActionSuccess"]
if not evt.metadata["lastActionSuccess"]:
print(evt.metadata["errorMessage"])
print(
"--------- Rotation not allowed! for pos {} rot {} ".format(
pos_unity, roatation_y
)
)
if minimum_path_length > eps and rotation_allowed:
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene)
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), object_type, counter
)
point_objects.append(
{
"id": point_id,
"scene": scene,
"object_type": object_type,
"object_id": object_id,
"target_position": target_position,
"initial_position": pos_unity,
"initial_orientation": roatation_y,
"shortest_path": path,
"shortest_path_length": minimum_path_length,
}
)
counter += 1
except ValueError:
print("-----Invalid path discarding point...")
failed_points.append(
{
"scene": scene,
"object_type": object_type,
"object_id": object_id,
"target_position": target_position,
"initial_position": pos_unity,
}
)
sorted_objs = sorted(point_objects, key=lambda m: m["shortest_path_length"])
third = int(len(sorted_objs) / 3.0)
for i, obj in enumerate(sorted_objs):
if i < third:
level = "easy"
elif i < 2 * third:
level = "medium"
else:
level = "hard"
sorted_objs[i]["difficulty"] = level
return sorted_objs
dataset = {}
dataset_flat = []
if intermediate_directory is not None:
if intermediate_directory != ".":
if os.path.exists(intermediate_directory):
shutil.rmtree(intermediate_directory)
os.makedirs(intermediate_directory)
def key_sort_func(scene_name):
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene_name)
return m.group(1), int(m.group(2)), int(m.group(3))
scenes = sorted(
[scene for scene in scenes_in_build if "physics" not in scene],
key=key_sort_func,
)
if scene_filter is not None:
scene_filter_set = set(scene_filter.split(","))
scenes = [s for s in scenes if s in scene_filter_set]
print("Sorted scenes: {}".format(scenes))
for scene in scenes:
dataset[scene] = {}
dataset["object_types"] = targets
objects = []
for objectType in targets:
if filter_file is None or (
objectType in scene_object_filter
and scene in scene_object_filter[objectType]
):
dataset[scene][objectType] = []
obj = get_points(controller, objectType, scene)
if obj is not None:
objects = objects + obj
dataset_flat = dataset_flat + objects
if intermediate_directory != ".":
with open(
os.path.join(intermediate_directory, "{}.json".format(scene)), "w"
) as f:
json.dump(objects, f, indent=4)
with open(os.path.join(intermediate_directory, output), "w") as f:
json.dump(dataset_flat, f, indent=4)
print("Object types in scene union: {}".format(objects_types_in_scene))
print("Total unique objects: {}".format(len(objects_types_in_scene)))
print("Total scenes: {}".format(len(scenes)))
print("Total datapoints: {}".format(len(dataset_flat)))
print(failed_points)
with open(os.path.join(intermediate_directory, "failed.json"), "w") as f:
json.dump(failed_points, f, indent=4)
@task
def shortest_path_to_object(
context,
scene,
object,
x,
z,
y=0.9103442,
rotation=0,
editor_mode=False,
local_build=False,
visibility_distance=1.0,
grid_size=0.25,
):
p = dict(x=x, y=y, z=z)
import ai2thor.controller
import ai2thor.util.metrics as metrics
angle = 45
gridSize = grid_size
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=visibility_distance,
)
path = metrics.get_shortest_path_to_object_type(
controller, object, p, {"x": 0, "y": 0, "z": 0}
)
minimum_path_length = metrics.path_distance(path)
print("Path: {}".format(path))
print("Path lenght: {}".format(minimum_path_length))
@task
def filter_dataset(ctx, filename, output_filename, ids=False):
"""
Filters objects in dataset that are not reachable in at least one of the scenes (have
zero occurrences in the dataset)
"""
with open(filename, "r") as f:
obj = json.load(f)
targets = [
"Apple",
"Baseball Bat",
"BasketBall",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
counter = {}
for f in obj:
obj_type = f["object_type"]
if f["scene"] not in counter:
counter[f["scene"]] = {target: 0 for target in targets}
scene_counter = counter[f["scene"]]
if obj_type not in scene_counter:
scene_counter[obj_type] = 1
else:
scene_counter[obj_type] += 1
objects_with_zero = set()
objects_with_zero_by_obj = {}
for k, item in counter.items():
# print("Key {} ".format(k))
for obj_type, count in item.items():
# print("obj {} count {}".format(obj_type, count))
if count == 0:
if obj_type not in objects_with_zero_by_obj:
objects_with_zero_by_obj[obj_type] = set()
# print("With zero for obj: {} in scene {}".format(obj_type, k))
objects_with_zero_by_obj[obj_type].add(k)
objects_with_zero.add(obj_type)
print("Objects with zero: {}".format(objects_with_zero))
with open("with_zero.json", "w") as fw:
dict_list = {k: list(v) for k, v in objects_with_zero_by_obj.items()}
json.dump(dict_list, fw, sort_keys=True, indent=4)
pprint.pprint(objects_with_zero_by_obj)
filtered = [o for o in obj if o["object_type"] not in objects_with_zero]
counter = 0
current_scene = ""
current_object_type = ""
for i, o in enumerate(filtered):
if current_scene != o["scene"] or current_object_type != o["object_type"]:
counter = 0
current_scene = o["scene"]
current_object_type = o["object_type"]
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", o["scene"])
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), o["object_type"], counter
)
counter += 1
o["id"] = point_id
with open(output_filename, "w") as f:
json.dump(filtered, f, indent=4)
@task
def fix_dataset_object_types(
ctx, input_file, output_file, editor_mode=False, local_build=False
):
import ai2thor.controller
with open(input_file, "r") as f:
obj = json.load(f)
scene = "FloorPlan_Train1_1"
angle = 45
gridSize = 0.25
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=1,
)
current_scene = None
object_map = {}
for i, point in enumerate(obj):
if current_scene != point["scene"]:
print("Fixing for scene '{}'...".format(point["scene"]))
controller.reset(point["scene"])
current_scene = point["scene"]
object_map = {
o["objectType"].lower(): {
"id": o["objectId"],
"type": o["objectType"],
}
for o in controller.last_event.metadata["objects"]
}
key = point["object_type"].replace(" ", "").lower()
point["object_id"] = object_map[key]["id"]
point["object_type"] = object_map[key]["type"]
with open(output_file, "w") as fw:
json.dump(obj, fw, indent=True)
@task
def test_dataset(
ctx, filename, scenes=None, objects=None, editor_mode=False, local_build=False
):
import ai2thor.controller
import ai2thor.util.metrics as metrics
scene = "FloorPlan_Train1_1" if scenes is None else scenes.split(",")[0]
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
scene=scene,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=0.25,
fieldOfView=60,
rotateStepDegrees=45,
agentMode="bot",
visibilityDistance=1,
)
with open(filename, "r") as f:
dataset = json.load(f)
filtered_dataset = dataset
if scenes is not None:
scene_set = set(scenes.split(","))
print("Filtering {}".format(scene_set))
filtered_dataset = [d for d in dataset if d["scene"] in scene_set]
if objects is not None:
object_set = set(objects.split(","))
print("Filtering {}".format(object_set))
filtered_dataset = [
d for d in filtered_dataset if d["object_type"] in object_set
]
current_scene = None
current_object = None
point_counter = 0
print(len(filtered_dataset))
for point in filtered_dataset:
if current_scene != point["scene"]:
current_scene = point["scene"]
print("Testing for scene '{}'...".format(current_scene))
if current_object != point["object_type"]:
current_object = point["object_type"]
point_counter = 0
print(" Object '{}'...".format(current_object))
try:
path = metrics.get_shortest_path_to_object_type(
controller,
point["object_type"],
point["initial_position"],
{"x": 0, "y": point["initial_orientation"], "z": 0},
)
path_dist = metrics.path_distance(path)
point_counter += 1
print(" Total points: {}".format(point_counter))
print(path_dist)
except ValueError:
print("Cannot find path from point")
@task
def visualize_shortest_paths(
ctx,
dataset_path,
width=600,
height=300,
editor_mode=False,
local_build=False,
scenes=None,
gridSize=0.25,
output_dir=".",
object_types=None,
):
angle = 45
import ai2thor.controller
from PIL import Image
controller = ai2thor.controller.Controller(
width=width,
height=height,
local_build=local_build,
start_unity=False if editor_mode else True,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=gridSize,
fieldOfView=60,
rotateStepDegrees=angle,
agentMode="bot",
visibilityDistance=1,
)
if output_dir != "." and os.path.exists(output_dir):
shutil.rmtree(output_dir)
if output_dir != ".":
os.mkdir(output_dir)
evt = controller.step(
action="AddThirdPartyCamera",
rotation=dict(x=90, y=0, z=0),
position=dict(x=5.40, y=3.25, z=-3.0),
fieldOfView=2.25,
orthographic=True,
)
evt = controller.step(action="SetTopLevelView", topView=True)
evt = controller.step(action="ToggleMapView")
# im = Image.fromarray(evt.third_party_camera_frames[0])
# im.save(os.path.join(output_dir, "top_view.jpg"))
with open(dataset_path, "r") as f:
dataset = json.load(f)
dataset_filtered = dataset
if scenes is not None:
scene_f_set = set(scenes.split(","))
dataset_filtered = [d for d in dataset if d["scene"] in scene_f_set]
if object_types is not None:
object_f_set = set(object_types.split(","))
dataset_filtered = [
d for d in dataset_filtered if d["object_type"] in object_f_set
]
print("Running for {} points...".format(len(dataset_filtered)))
index = 0
print(index)
print(len(dataset_filtered))
datapoint = dataset_filtered[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
failed = {}
while index < len(dataset_filtered):
previous_index = index
controller.reset(current_scene)
while (
current_scene == datapoint["scene"]
and current_object == datapoint["object_type"]
):
index += 1
if index > len(dataset_filtered) - 1:
break
datapoint = dataset_filtered[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
key = "{}_{}".format(current_scene, current_object)
failed[key] = []
print(
"Points for '{}' in scene '{}'...".format(current_object, current_scene)
)
evt = controller.step(
action="AddThirdPartyCamera",
rotation=dict(x=90, y=0, z=0),
position=dict(x=5.40, y=3.25, z=-3.0),
fieldOfView=2.25,
orthographic=True,
)
sc = dataset_filtered[previous_index]["scene"]
obj_type = dataset_filtered[previous_index]["object_type"]
positions = [
d["initial_position"] for d in dataset_filtered[previous_index:index]
]
# print("{} : {} : {}".format(sc, obj_type, positions))
evt = controller.step(
action="VisualizeShortestPaths",
objectType=obj_type,
positions=positions,
grid=True,
)
im = Image.fromarray(evt.third_party_camera_frames[0])
im.save(os.path.join(output_dir, "{}-{}.jpg".format(sc, obj_type)))
# print("Retur {}, {} ".format(evt.metadata['actionReturn'], evt.metadata['lastActionSuccess']))
# print(evt.metadata['errorMessage'])
failed[key] = [
positions[i]
for i, success in enumerate(evt.metadata["actionReturn"])
if not success
]
pprint.pprint(failed)
@task
def fill_in_dataset(
ctx,
dataset_dir,
dataset_filename,
filter_filename,
intermediate_dir,
output_filename="filled.json",
local_build=False,
editor_mode=False,
visibility_distance=1.0,
):
import glob
import ai2thor.controller
dataset_path = os.path.join(dataset_dir, dataset_filename)
def key_sort_func(scene_name):
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", scene_name)
return m.group(1), int(m.group(2)), int(m.group(3))
targets = [
"Apple",
"Baseball Bat",
"Basketball",
"Bowl",
"Garbage Can",
"House Plant",
"Laptop",
"Mug",
"Remote",
"Spray Bottle",
"Vase",
"Alarm Clock",
"Television",
"Pillow",
]
controller = ai2thor.controller.Controller(
width=300,
height=300,
local_build=local_build,
start_unity=False if editor_mode else True,
port=8200,
host="127.0.0.1",
# Unity params
gridSize=0.25,
fieldOfView=60,
rotateStepDegrees=45,
agentMode="bot",
visibilityDistance=1,
)
scenes = sorted(
[scene for scene in controller._scenes_in_build if "physics" not in scene],
key=key_sort_func,
)
missing_datapoints_by_scene = {}
partial_dataset_by_scene = {}
for scene in scenes:
missing_datapoints_by_scene[scene] = []
partial_dataset_by_scene[scene] = []
with open(dataset_path, "r") as f:
create_dataset(
ctx,
local_build=local_build,
editor_mode=editor_mode,
output=output_filename,
intermediate_directory=intermediate_dir,
visibility_distance=visibility_distance,
)
for datapoint in filter_dataset:
missing_datapoints_by_scene[datapoint["scene"]].append(datapoint)
partial_dataset_filenames = sorted(
glob.glob("{}/FloorPlan_*.png".format(dataset_dir))
)
print("Datas")
difficulty_order_map = {"easy": 0, "medium": 1, "hard": 2}
for d_filename in partial_dataset_filenames:
with open(d_filename, "r") as fp:
partial_dataset = json.load(fp)
partial_dataset[0]["scene"] = partial_dataset
final_dataset = []
for scene in scenes:
for object_type in targets:
arr = [
p for p in partial_dataset[scene] if p["object_type"] == object_type
] + [
p
for p in missing_datapoints_by_scene[scene]
if p["object_type"] == object_type
]
final_dataset = final_dataset + sorted(
arr,
key=lambda p: (
p["object_type"],
difficulty_order_map[p["difficulty"]],
),
)
@task
def test_teleport(ctx, editor_mode=False, local_build=False):
import ai2thor.controller
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=0.25,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene="FloorPlan_Train1_2",
width=640,
height=480,
continus=True,
)
controller.step(action="GetReachablePositions", gridSize=0.25)
params = {
"x": 8.0,
"y": 0.924999952,
"z": -1.75,
"rotation": {"x": 0.0, "y": 240.0, "z": 0.0},
"horizon": 330.0,
}
evt = controller.step(action="TeleportFull", **params)
print("New pos: {}".format(evt.metadata["agent"]["position"]))
@task
def resort_dataset(ctx, dataset_path, output_path, editor_mode=False, local_build=True):
with open(dataset_path, "r") as f:
dataset = json.load(f)
index = 0
previous_index = 0
datapoint = dataset[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
# controller.reset(current_scene)
sum_t = 0
new_dataset = []
while index < len(dataset):
previous_index = index
while (
current_scene == datapoint["scene"]
and current_object == datapoint["object_type"]
):
index += 1
if index > len(dataset) - 1:
break
datapoint = dataset[index]
current_scene = datapoint["scene"]
current_object = datapoint["object_type"]
print("Scene '{}'...".format(current_scene))
sorted_datapoints = sorted(
dataset[previous_index:index], key=lambda dp: dp["shortest_path_length"]
)
third = int(len(sorted_datapoints) / 3.0)
for i, obj in enumerate(sorted_datapoints):
if i < third:
level = "easy"
elif i < 2 * third:
level = "medium"
else:
level = "hard"
sorted_datapoints[i]["difficulty"] = level
m = re.search("FloorPlan_([a-zA-Z\-]*)([0-9]+)_([0-9]+)", obj["scene"])
point_id = "{}_{}_{}_{}_{}".format(
m.group(1), m.group(2), m.group(3), obj["object_type"], i
)
sorted_datapoints[i]["id"] = point_id
sorted_datapoints[i]["difficulty"] = level
new_dataset = new_dataset + sorted_datapoints
sum_t += len(sorted_datapoints)
print("original len: {}, new len: {}".format(len(dataset), sum_t))
with open(output_path, "w") as fw:
json.dump(new_dataset, fw, indent=4)
@task
def remove_dataset_spaces(ctx, dataset_dir):
train = os.path.join(dataset_dir, "train.json")
test = os.path.join(dataset_dir, "val.json")
with open(train, "r") as f:
train_data = json.load(f)
with open(test, "r") as f:
test_data = json.load(f)
id_set = set()
for o in train_data:
o["id"] = o["id"].replace(" ", "")
id_set.add(o["id"])
print(sorted(id_set))
id_set = set()
for o in test_data:
o["id"] = o["id"].replace(" ", "")
id_set.add(o["id"])
print(sorted(id_set))
with open("train.json", "w") as fw:
json.dump(train_data, fw, indent=4, sort_keys=True)
with open("val.json", "w") as fw:
json.dump(test_data, fw, indent=4, sort_keys=True)
@task
def shortest_path_to_point(ctx, scene, x0, y0, z0, x1, y1, z1, editor_mode=False):
import ai2thor.util.metrics as metrics
import ai2thor.controller
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=0.25,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene=scene,
width=300,
height=300,
continus=True,
)
evt = metrics.get_shortest_path_to_point(
controller, dict(x=x0, y=y0, z=z0), dict(x=x1, y=y1, z=z1)
)
print(evt.metadata["lastActionSuccess"])
print(evt.metadata["errorMessage"])
@task
def reachable_pos(ctx, scene, editor_mode=False, local_build=False):
import ai2thor.controller
gridSize = 0.25
controller = ai2thor.controller.Controller(
rotateStepDegrees=30,
visibilityDistance=1.0,
gridSize=gridSize,
port=8200,
host="127.0.0.1",
local_build=local_build,
start_unity=False if editor_mode else True,
agentType="stochastic",
continuousMode=True,
continuous=False,
snapToGrid=False,
agentMode="locobot",
scene=scene,
width=300,
height=300,
continus=True,
)
print(
"constoller.last_action Agent Pos: {}".format(
controller.last_event.metadata["agent"]["position"]
)
)
evt = controller.step(action="GetReachablePositions", gridSize=gridSize)
print("After GetReachable AgentPos: {}".format(evt.metadata["agent"]["position"]))
print(evt.metadata["lastActionSuccess"])
print(evt.metadata["errorMessage"])
reachable_pos = evt.metadata["actionReturn"]
print(evt.metadata["actionReturn"])
evt = controller.step(
dict(
action="TeleportFull",
x=3.0,
y=reachable_pos[0]["y"],
z=-1.5,
rotation=dict(x=0, y=45.0, z=0),
horizon=0.0,
)
)
print("After teleport: {}".format(evt.metadata["agent"]["position"]))
@task
def get_physics_determinism(
ctx, scene="FloorPlan1_physics", agent_mode="arm", n=100, samples=100
):
import ai2thor.controller
import random
num_trials = n
width = 300
height = 300
fov = 100
def act(controller, actions, n):
for i in range(n):
action = random.choice(actions)
controller.step(dict(action=action))
controller = ai2thor.controller.Controller(
local_executable_path=None,
scene=scene,
gridSize=0.25,
width=width,
height=height,
agentMode=agent_mode,
fieldOfView=fov,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
visibilityScheme="Distance",
)
from ai2thor.util.trials import trial_runner, ObjectPositionVarianceAverage
move_actions = ["MoveAhead", "MoveBack", "MoveLeft", "MoveRight"]
rotate_actions = ["RotateRight", "RotateLeft"]
look_actions = ["LookUp", "LookDown"]
all_actions = move_actions + rotate_actions + look_actions
sample_number = samples
action_tuples = [
("move", move_actions, sample_number),
("rotate", rotate_actions, sample_number),
("look", look_actions, sample_number),
("all", all_actions, sample_number),
]
for action_name, actions, n in action_tuples:
for controller, metric in trial_runner(
controller, num_trials, ObjectPositionVarianceAverage()
):
act(controller, actions, n)
print(
" actions: '{}', object_position_variance_average: {} ".format(
action_name, metric
)
)
@task
def generate_pypi_index(context):
s3 = boto3.resource("s3")
root_index = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<HTML>
<BODY>
<a href="/ai2thor/index.html">/ai2thor/</a><br>
</BODY>
</HTML>
"""
s3.Object(ai2thor.build.PYPI_S3_BUCKET, "index.html").put(
Body=root_index, ACL="public-read", ContentType="text/html"
)
objects = list_objects_with_metadata(ai2thor.build.PYPI_S3_BUCKET)
links = []
for k, v in objects.items():
if k.split("/")[-1] != "index.html":
links.append('<a href="/%s">/%s</a><br>' % (k, k))
ai2thor_index = """
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0//EN">
<HTML>
<BODY>
%s
</BODY>
</HTML>
""" % "\n".join(
links
)
s3.Object(ai2thor.build.PYPI_S3_BUCKET, "ai2thor/index.html").put(
Body=ai2thor_index, ACL="public-read", ContentType="text/html"
)
def ci_test_utf(branch, commit_id, base_dir):
logger.info(
"running Unity Test framework testRunner for %s %s %s"
% (branch, commit_id, base_dir)
)
results_path, results_logfile = test_utf(base_dir)
class_data = generate_pytest_utf(results_path)
test_path = "tmp/test_utf.py"
with open(test_path, "w") as f:
f.write("\n".join(class_data))
proc = subprocess.run(
"pytest %s" % test_path,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
)
result = dict(
success=proc.returncode == 0,
stdout=proc.stdout.decode("ascii"),
stderr=proc.stderr.decode("ascii"),
)
with open("tmp/test_utf_results.json", "w") as f:
f.write(json.dumps(result))
logger.info("finished Unity Test framework runner for %s %s" % (branch, commit_id))
@task
def format(context):
format_py(context)
format_cs(context)
@task
def format_cs(context):
install_dotnet_format(context)
# the following message will get emitted, this can safely be ignored
# "Warnings were encountered while loading the workspace. Set the verbosity option to the 'diagnostic' level to log warnings"
subprocess.check_call(
".dotnet/dotnet tool run dotnet-format unity/AI2-THOR-Base.csproj -w -s",
shell=True,
)
@task
def install_dotnet_format(context, force=False):
install_dotnet(context)
base_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
if not os.path.isfile(".config/dotnet-tools.json"):
command = os.path.join(base_dir, ".dotnet/dotnet") + " new tool-manifest"
subprocess.check_call(command, shell=True)
with open(".config/dotnet-tools.json") as f:
tools = json.loads(f.read())
# we may want to specify a version here in the future
if not force and "dotnet-format" in tools.get("tools", {}):
# dotnet-format already installed
return
command = os.path.join(base_dir, ".dotnet/dotnet") + " tool install dotnet-format"
subprocess.check_call(command, shell=True)
@task
def install_dotnet(context, force=False):
import requests
import stat
base_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)))
if not force and os.path.isfile(os.path.join(base_dir, ".dotnet/dotnet")):
# dotnet already installed
return
# https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-install-script
res = requests.get("https://dot.net/v1/dotnet-install.sh")
res.raise_for_status()
target = os.path.join(base_dir, "dotnet-install.sh")
with open(target, "wb") as f:
f.write(res.content)
os.chmod(target, stat.S_IREAD | stat.S_IEXEC | stat.S_IWRITE)
env = os.environ.copy()
env["DOTNET_INSTALL_DIR"] = os.path.join(base_dir, ".dotnet")
subprocess.check_call(target, shell=True, env=env)
os.unlink(target)
@task
def format_py(context):
try:
import black
except ImportError:
raise Exception("black not installed - run pip install black")
subprocess.check_call(
"black -v -t py38 --exclude unity/ --exclude .git/ .", shell=True
)
@task
def install_unity_hub(
context, target_dir=os.path.join(os.path.expanduser("~"), "local/bin")
):
import stat
import requests
if not sys.platform.startswith("linux"):
raise Exception("Installation only support for Linux")
res = requests.get(
"https://public-cdn.cloud.unity3d.com/hub/prod/UnityHub.AppImage"
)
res.raise_for_status()
os.makedirs(target_dir, exist_ok=True)
target_path = os.path.join(target_dir, "UnityHub.AppImage")
tmp_path = target_path + ".tmp-" + str(os.getpid())
with open(tmp_path, "wb") as f:
f.write(res.content)
if os.path.isfile(target_path):
os.unlink(target_path)
os.rename(tmp_path, target_path)
os.chmod(
target_path,
stat.S_IRWXU | stat.S_IRGRP | stat.S_IXGRP | stat.S_IROTH | stat.S_IXOTH,
)
print("Installed UnityHub at %s" % target_path)
@task
def install_unity_editor(context, version=None, changeset=None):
import yaml
import re
unity_hub_path = None
if sys.platform.startswith("linux"):
unity_hub_path = os.path.join(
os.path.expanduser("~"), "local/bin/UnityHub.AppImage"
)
elif sys.platform.startswith("darwin"):
unity_hub_path = "/Applications/Unity\ Hub.app/Contents/MacOS/Unity\ Hub --"
else:
raise Exception("UnityHub CLI not supported")
if version is None:
with open("unity/ProjectSettings/ProjectVersion.txt") as pf:
project_version = yaml.load(pf.read(), Loader=yaml.FullLoader)
m = re.match(
r"^([^\s]+)\s+\(([a-zAZ0-9]+)\)",
project_version["m_EditorVersionWithRevision"],
)
assert m, (
"Could not extract version/changeset from %s"
% project_version["m_EditorVersionWithRevision"]
)
version = m.group(1)
changeset = m.group(2)
command = "%s --headless install --version %s" % (unity_hub_path, version)
if changeset:
command += " --changeset %s" % changeset
platform_modules = dict(
linux=["mac-mono", "linux-il2cpp", "webgl"],
darwin=["mac-il2cpp", "linux-il2cpp", "linux-mono", "webgl"],
)
for m in platform_modules[sys.platform]:
command += " -m %s" % m
subprocess.check_call(command, shell=True)
@task
def generate_unity_alf(context):
# generates Unity License Acitivation file for use
# with manual activation https://docs.unity3d.com/Manual/ManualActivationGuide.html
alf_path = "Unity_v%s.alf" % _unity_version()
subprocess.run(
"%s -batchmode -createManualActivationFile" % _unity_path(), shell=True
)
assert os.path.isfile(alf_path), "ALF not found at %s" % alf_path
print(
"ALF created at %s. Activate license at: https://license.unity3d.com/manual"
% alf_path
)
@task
def activate_unity_license(context, ulf_path):
assert os.path.isfile(ulf_path), "License file '%s' not found" % ulf_path
subprocess.run(
'%s -batchmode -manualLicenseFile "%s"' % (_unity_path(), ulf_path), shell=True
)
def test_utf(base_dir=None):
"""
Generates a module named ai2thor/tests/test_utf.py with test_XYZ style methods
that include failures (if any) extracted from the xml output
of the Unity Test Runner
"""
if base_dir is None:
base_dir = os.getcwd()
project_path = os.path.join(base_dir, "unity")
commit_id = git_commit_id()
test_results_path = os.path.join(project_path, "utf_testResults-%s.xml" % commit_id)
logfile_path = os.path.join(base_dir, "thor-testResults-%s.log" % commit_id)
command = (
"%s -runTests -testResults %s -logFile %s -testPlatform PlayMode -projectpath %s "
% (_unity_path(), test_results_path, logfile_path, project_path)
)
subprocess.call(command, shell=True, cwd=base_dir)
return test_results_path, logfile_path
def generate_pytest_utf(test_results_path):
import xml.etree.ElementTree as ET
with open(test_results_path) as f:
root = ET.fromstring(f.read())
from collections import defaultdict
class_tests = defaultdict(list)
for test_case in root.findall(".//test-case"):
# print(test_case.attrib['methodname'])
class_tests[test_case.attrib["classname"]].append(test_case)
class_data = []
class_data.append(
f"""
# GENERATED BY tasks.generate_pytest_utf - DO NOT EDIT/COMMIT
import pytest
import json
import os
def test_testresults_exist():
test_results_path = "{test_results_path}"
assert os.path.isfile("{test_results_path}"), "TestResults at: {test_results_path} do not exist"
"""
)
for class_name, test_cases in class_tests.items():
test_records = []
for test_case in test_cases:
methodname = test_case.attrib["methodname"]
if test_case.attrib["result"] == "Failed":
fail_message = test_case.find("failure/message")
stack_trace = test_case.find("failure/stack-trace")
message = json.dumps(fail_message.text + " " + stack_trace.text)
test_data = f"""
def test_{methodname}(self):
pytest.fail(json.loads(r\"\"\"
{message}
\"\"\"
))
"""
else:
test_data = f"""
def test_{methodname}(self):
pass
"""
test_records.append(test_data)
test_record_data = " pass"
if test_records:
test_record_data = "\n".join(test_records)
encoded_class_name = re.sub(
r"[^a-zA-Z0-9_]", "_", re.sub("_", "__", class_name)
)
class_data.append(
f"""
class {encoded_class_name}:
{test_record_data}
"""
)
with open("ai2thor/tests/test_utf.py", "w") as f:
f.write("\n".join(class_data))
return class_data
@task
def create_room(
ctx,
file_path="unity/Assets/Resources/rooms/1.json",
editor_mode=False,
local_build=False,
):
import ai2thor.controller
import json
import os
print(os.getcwd())
width = 300
height = 300
fov = 100
n = 20
import os
controller = ai2thor.controller.Controller(
local_executable_path=None,
local_build=local_build,
start_unity=False if editor_mode else True,
scene="Procedural",
gridSize=0.25,
width=width,
height=height,
fieldOfView=fov,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
visibilityScheme="Distance",
)
# print(
# "constoller.last_action Agent Pos: {}".format(
# controller.last_event.metadata["agent"]["position"]
# )
# )
# evt = controller.step(action="GetReachablePositions", gridSize=gridSize)
# print("After GetReachable AgentPos: {}".format(evt.metadata["agent"]["position"]))
#
# print(evt.metadata["lastActionSuccess"])
# print(evt.metadata["errorMessage"])
#
# reachable_pos = evt.metadata["actionReturn"]
#
# print(evt.metadata["actionReturn"])
print(os.getcwd())
with open(file_path, "r") as f:
obj = json.load(f)
walls = obj["walls"]
evt = controller.step(
dict(
action="CreateRoom",
walls=walls,
wallHeight=2.0,
wallMaterialId="DrywallOrange",
floorMaterialId="DarkWoodFloors",
)
)
for i in range(n):
controller.step("MoveAhead")
@task
def test_render(ctx, editor_mode=False, local_build=False):
import ai2thor.controller
import cv2
import numpy as np
print(os.getcwd())
width = 300
height = 300
fov = 45
controller = ai2thor.controller.Controller(
local_executable_path=None,
local_build=local_build,
start_unity=False if editor_mode else True,
scene="Procedural",
gridSize=0.25,
port=8200,
width=width,
height=height,
fieldOfView=fov,
agentCount=1,
renderDepthImage=True,
server_class=ai2thor.fifo_server.FifoServer,
)
image_folder_path = "debug_img"
rgb_filename = "colortest.png"
depth_filename = "depth_rawtest.npy"
img = cv2.imread(os.path.join(image_folder_path, rgb_filename))
from pprint import pprint
from ai2thor.interact import InteractiveControllerPrompt
obj = {
"id": "house_0",
"layout": """
0 0 0 0 0 0
0 2 2 2 2 0
0 2 2 2 2 0
0 1 1 1 1 0
0 1 1 1 1 0
0 0 0 0 0 0
""",
"objectsLayouts": [
"""
0 0 0 0 0 0
0 2 2 2 2 0
0 2 2 2 = 0
0 1 1 1 = 0
0 1 1 1 + 0
0 0 0 0 0 0
"""
],
"rooms": {
"1": {
"wallTemplate": {
"unlit": False,
"color": {"r": 1.0, "g": 0.0, "b": 0.0, "a": 1.0},
},
"floorTemplate": {
"roomType": "Bedroom",
"floorMaterial": "DarkWoodFloors",
},
"floorYPosition": 0.0,
"wallHeight": 3.0,
},
"2": {
"wallTemplate": {
"unlit": False,
"color": {"r": 0.0, "g": 0.0, "b": 1.0, "a": 1.0},
},
"floorTemplate": {
"roomType": "LivingRoom",
"floorMaterial": "RedBrick",
},
"floorYPosition": 0.0,
"wallHeight": 3.0,
},
},
"holes": {"=": {"room0": "1", "openness": 1.0, "assetId": "Doorway_1"}},
"objects": {"+": {"kinematic": True, "assetId": "Chair_007_1"}},
"proceduralParameters": {
"floorColliderThickness": 1.0,
"receptacleHeight": 0.7,
"skyboxId": "Sky1",
"ceilingMaterial": "ps_mat",
},
}
pprint(obj)
template = obj
evt = controller.step(action="GetHouseFromTemplate", template=template)
print(
"Action success {0}, message {1}".format(
evt.metadata["lastActionSuccess"], evt.metadata["errorMessage"]
)
)
house = evt.metadata["actionReturn"]
controller.step(action="CreateHouse", house=house)
evt = controller.step(
dict(
action="TeleportFull",
x=3.0,
y=0.9010001,
z=1.0,
rotation=dict(x=0, y=0, z=0),
horizon=0,
standing=True,
forceAction=True,
)
)
cv2.namedWindow("image2")
cv2.imshow("image2", evt.cv2img)
if img is not None:
print(f"img r {img[0][0][0]} g {img[0][0][1]} b {img[0][0][2]}")
print(
f"evt frame r {evt.cv2img[0][0][0]} g {evt.cv2img[0][0][1]} b {evt.cv2img[0][0][2]}"
)
cv2.namedWindow("image")
cv2.imshow("image", img)
print(img.shape)
print(np.allclose(evt.cv2img, img))
raw_depth = np.load(os.path.join(image_folder_path, depth_filename))
print(f"depth evt {evt.depth_frame.shape} compare {raw_depth.shape}")
print(np.allclose(evt.depth_frame, raw_depth))
dx = np.where(~np.all(evt.cv2img == img, axis=-1))
print(list(dx))
img[dx] = (255, 0, 255)
print(img[dx])
cv2.namedWindow("image-diff")
cv2.imshow("image-diff", img)
print(img.shape)
cv2.waitKey(0)
else:
cv2.waitKey(0)
InteractiveControllerPrompt.write_image(
evt, "debug_img", "test", depth_frame=True, color_frame=True
)
@task
def create_json(ctx, file_path, output=None):
import json
import functools
import itertools
from pprint import pprint
add = lambda x, y: x + y
sub = lambda x, y: x - y
def vec3(x, y, z):
return {"x": x, "y": y, "z": z}
def point_wise_2(v1, v2, func):
return {k: func(v1[k], v2[k]) for k in ["x", "y", "z"]}
def point_wise(v1, func):
return {k: func(v1[k]) for k in ["x", "y", "z"]}
def sum(vec):
return functools.reduce(lambda a, b: a + b, vec.values())
def sqr_dist(v1, v2):
return sum(point_wise(point_wise_2(v1, v2, sub), lambda x: x**2))
def wall_to_poly(wall):
return [
wall["p0"],
wall["p1"],
point_wise_2(wall["p1"], vec3(0, wall["height"], 0), add),
point_wise_2(wall["p0"], vec3(0, wall["height"], 0), add),
]
def walls_to_floor_poly(walls):
result = []
wall_list = list(walls)
eps = 1e-4
eps_sqr = eps**2
result.append(walls[0]["p0"])
while len(wall_list) != 0:
wall = wall_list.pop(0)
p1 = wall["p1"]
wall_list = sorted(wall_list, key=lambda w: sqr_dist(p1, w["p0"]))
if len(wall_list) != 0:
closest = wall_list[0]
dist = sqr_dist(p1, closest["p0"])
if dist < eps_sqr:
result.append(closest["p0"])
else:
return None
return result
with open(file_path, "r") as f:
obj = json.load(f)
walls = [
[
{
"id": "wall_{}_{}".format(room_i, wall_indx),
"roomId": "room_{}".format(room_i),
"material": wall["materialId"],
"empty": wall["empty"] if "empty" in wall else False,
"polygon": wall_to_poly(wall),
}
for (wall, wall_indx) in zip(
room["walls"], range(0, len(room["walls"]))
)
]
for (room, room_i) in zip(obj["rooms"], range(len(obj["rooms"])))
]
rooms = [
{
"id": "room_{}".format(room_i),
"type": "",
"floorMaterial": room["rectangleFloor"]["materialId"],
"children": [],
"ceilings": [],
"floorPolygon": walls_to_floor_poly(room["walls"]),
}
for (room, room_i) in zip(obj["rooms"], range(len(obj["rooms"])))
]
walls = list(itertools.chain(*walls))
house = {
"rooms": rooms,
"walls": walls,
"proceduralParameters": {
"ceilingMaterial": obj["ceilingMaterialId"],
"floorColliderThickness": 1.0,
"receptacleHeight": 0.7,
"skyboxId": "Sky1",
"lights": [],
},
}
pprint(house)
if output is not None:
with open(output, "w") as fw:
json.dump(house, fw, indent=4, sort_keys=True)
@task
def spawn_obj_test(ctx, file_path, room_id, editor_mode=False, local_build=False):
import ai2thor.controller
import json
import os
import time
print(os.getcwd())
width = 300
height = 300
fov = 100
n = 20
import os
from pprint import pprint
controller = ai2thor.controller.Controller(
local_executable_path=None,
local_build=local_build,
start_unity=False if editor_mode else True,
scene="Procedural",
gridSize=0.25,
width=width,
height=height,
fieldOfView=fov,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
visibilityScheme="Distance",
)
# print(
# "constoller.last_action Agent Pos: {}".format(
# controller.last_event.metadata["agent"]["position"]
# )
# )
# evt = controller.step(action="GetReachablePositions", gridSize=gridSize)
# print("After GetReachable AgentPos: {}".format(evt.metadata["agent"]["position"]))
#
# print(evt.metadata["lastActionSuccess"])
# print(evt.metadata["errorMessage"])
#
# reachable_pos = evt.metadata["actionReturn"]
#
# print(evt.metadata["actionReturn"])
print(os.getcwd())
with open(file_path, "r") as f:
obj = json.load(f)
obj["walls"] = [wall for wall in obj["walls"] if wall["roomId"] == room_id]
obj["rooms"] = [room for room in obj["rooms"] if room["id"] == room_id]
obj["objects"] = []
pprint(obj)
evt = controller.step(dict(action="CreateHouseFromJson", house=obj))
evt = controller.step(
dict(
action="TeleportFull",
x=4.0,
y=0.9010001,
z=4.0,
rotation=dict(x=0, y=0, z=0),
horizon=30,
standing=True,
forceAction=True,
)
)
# dict("axis" = dict(x=0, y=1.0, z=0), "degrees": 90)
# SpawnObjectInReceptacleRandomly(string objectId, string prefabName, string targetReceptacle, AxisAngleRotation rotation)
evt = controller.step(
dict(
action="SpawnObjectInReceptacleRandomly",
objectId="table_1",
prefabName="Coffee_Table_211_1",
targetReceptacle="Floor|+00.00|+00.00|+00.00",
rotation=dict(axis=dict(x=0, y=1.0, z=0), degrees=90),
)
)
print(evt.metadata["lastActionSuccess"])
print(evt.metadata["errorMessage"])
# this is what you need
object_position = evt.metadata["actionReturn"]
print(object_position)
for i in range(n):
controller.step("MoveAhead")
time.sleep(0.4)
for j in range(6):
controller.step("RotateRight")
time.sleep(0.7)
@task
def plot(
ctx,
benchamrk_filenames,
plot_titles=None,
x_label="Rooms",
y_label="Actions Per Second",
title="Procedural Benchmark",
last_ithor=False,
output_filename="benchmark",
width=9.50,
height=7.5,
action_breakdown=False,
):
import matplotlib.pyplot as plt
from functools import reduce
from matplotlib.lines import Line2D
filter = "all"
def get_data(benchmark, filter="all"):
keys = list(benchmark["scenes"].keys())
if filter is not None:
y = [benchmark["scenes"][m][filter] for m in keys]
else:
y = [benchmark["scenes"][m] for m in keys]
return keys, y
def load_benchmark_filename(filename):
with open(filename) as f:
return json.load(f)
def get_benchmark_title(benchmark, default_title=""):
if "title" in benchmark:
return benchmark["title"]
else:
return default_title
benchmark_filenames = benchamrk_filenames.split(",")
# markers = ["o", "*", "^", "+", "~"]
markers = list(Line2D.markers.keys())
# remove empty marker
markers.pop(1)
benchmarks = [load_benchmark_filename(filename) for filename in benchmark_filenames]
benchmark_titles = [
get_benchmark_title(b, "")
for (i, b) in zip(range(0, len(benchmarks)), benchmarks)
]
if plot_titles is not None:
titles = plot_titles.split(",")
else:
titles = [""] * len(benchmark_titles)
plot_titles = [
benchmark_titles[i] if title == "" else title
for (i, title) in zip(range(0, len(titles)), titles)
]
filter = "all" if not action_breakdown else None
all_data = [get_data(b, filter) for b in benchmarks]
if action_breakdown:
plot_titles = reduce(
list.__add__,
[
["{} {}".format(title, action) for action in all_data[0][1][0]]
for title in plot_titles
],
)
all_data = reduce(
list.__add__,
[
[(x, [y[action] for y in b]) for action in all_data[0][1][0]]
for (x, b) in all_data
],
)
keys = [k for (k, y) in all_data]
y = [y for (k, y) in all_data]
min_key_number = min(keys)
ax = plt.gca()
plt.rcParams["figure.figsize"] = [width, height]
plt.rcParams["figure.autolayout"] = True
fig = plt.figure()
for i, (x, y) in zip(range(0, len(all_data)), all_data):
marker = markers[i] if i < len(markers) else "*"
ithor_datapoint = last_ithor and i == len(all_data) - 1
x_a = all_data[i - 1][0] if ithor_datapoint else x
plt.plot(
[x_s.split("/")[-1].split("_")[0] for x_s in x_a],
y,
marker=marker,
label=plot_titles[i],
)
if ithor_datapoint:
for j in range(len(x)):
print(j)
print(x[j])
plt.annotate(x[j].split("_")[0], (j, y[j] + 0.2))
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
plt.legend()
plt.savefig("{}.png".format(output_filename.replace(".png", "")))
@task
def run_benchmark(
ctx,
width=600,
height=600,
fov=45,
editor_mode=False,
out="benchmark.json",
verbose=False,
local_build=False,
number_samples=100,
gridSize=0.25,
scenes=None,
house_json_path=None,
filter_object_types="",
teleport_random_before_actions=False,
commit_id=ai2thor.build.COMMIT_ID,
distance_visibility_scheme=False,
title="",
):
import json
import os
import ai2thor.wsgi_server as ws
import ai2thor.fifo_server as fs
path = os.path.abspath(fs.__file__)
print(path)
import ai2thor.benchmarking as benchmark
args = dict(
local_executable_path=None,
local_build=local_build,
start_unity=False if editor_mode else True,
commit_id=commit_id,
gridSize=0.25,
width=width,
height=height,
fieldOfView=fov,
server_type=ai2thor.wsgi_server.WsgiServer.server_type,
visibilityScheme="Distance" if distance_visibility_scheme else "Collider",
)
if editor_mode:
args["port"] = 8200
args["start_unity"] = False
elif local_build:
args["local_build"] = local_build
else:
args["commit_id"] = commit_id
house_json_paths = []
if house_json_path:
house_json_paths = house_json_path.split(",")
houses = []
for house_json_filename in house_json_paths:
with open(house_json_filename, "r") as f:
house = json.load(f)
if scenes:
scenes = scenes.split(",")
else:
scenes = []
if "Procedural" not in scenes and len(house_json_paths):
scenes.append("Procedural")
runner = benchmark.UnityActionBenchmarkRunner(
benchmarker_class_names=["SimsPerSecondBenchmarker"],
init_params=args,
name=title,
scenes=scenes,
procedural_houses=houses,
action_sample_count=number_samples,
experiment_sample_count=1,
filter_object_types=filter_object_types,
teleport_random_before_actions=teleport_random_before_actions,
verbose=verbose,
output_file=out,
)
result = runner.benchmark(
{
"move": {
"actions": [
{"action": "MoveAhead", "args": {}},
{"action": "MoveBack", "args": {}},
{"action": "MoveLeft", "args": {}},
{"action": "MoveRight", "args": {}},
],
"sample_count": number_samples, # optional field, will be added from action_sample_count param if not present
"selector": "random",
},
"rotate": {
"actions": [
{"action": "RotateRight", "args": {}},
{"action": "RotateLeft", "args": {}},
],
"sample_count": number_samples,
"selector": "random",
},
"look": {
"actions": [
{"action": "LookUp", "args": {}},
{"action": "LookDown", "args": {}},
],
"sample_count": number_samples,
"selector": "random",
},
"all": {
"actions": [
{"action": "MoveAhead", "args": {}},
{"action": "MoveBack", "args": {}},
{"action": "MoveLeft", "args": {}},
{"action": "MoveRight", "args": {}},
{"action": "RotateRight", "args": {}},
{"action": "RotateLeft", "args": {}},
{"action": "LookUp", "args": {}},
{"action": "LookDown", "args": {}},
],
"sample_count": number_samples,
"selector": "random",
},
}
)
with open(out, "w") as f:
json.dump(result, r, indent=4, sort_keys=True)
@task
def run_benchmark_from_s3_config(ctx):
import copy
from datetime import datetime, timezone
from ai2thor.benchmarking import BENCHMARKING_S3_BUCKET, UnityActionBenchmarkRunner
client = boto3.client("s3")
response = client.list_objects_v2(
Bucket=BENCHMARKING_S3_BUCKET, Prefix="benchmark_jobs/"
)
s3 = boto3.resource("s3", region_name="us-west-2")
benchmark_runs = []
for content in response.get("Contents", []):
key = content["Key"]
if key.split(".")[-1] == "json":
print(f"Key: {key}")
obj = s3.Object(BENCHMARKING_S3_BUCKET, content["Key"])
benchmark_run_config = json.loads(obj.get()["Body"].read().decode("utf-8"))
procedural_houses_transformed = []
if "procedural_houses" in benchmark_run_config:
for procedural_house in benchmark_run_config["procedural_houses"]:
if isinstance(procedural_house, str):
house_obj = s3.Object(
BENCHMARKING_S3_BUCKET,
f"procedural_houses/{procedural_house}",
)
house_json = json.loads(
house_obj.get()["Body"].read().decode("utf-8")
)
if "id" not in house_json:
house_json["id"] = procedural_house.split(".")[0]
procedural_houses_transformed.append(house_json)
elif isinstance(procedural_house, dict):
procedural_houses_transformed.append(procedural_house)
benchmark_run_config["procedural_houses"] = procedural_houses_transformed
benchmark_run_config["config_name"] = os.path.basename(key)
# benchmark_run_config['verbose'] = True
action_groups = copy.deepcopy(benchmark_run_config["action_groups"])
del benchmark_run_config["action_groups"]
benchmark_runs.append(
(UnityActionBenchmarkRunner(**benchmark_run_config), action_groups)
)
report_nowutc = datetime.now(timezone.utc)
report_name = f"{round(report_nowutc.timestamp())}_benchmark.json"
benchmark_results = []
for benchmark_runner, action_group in benchmark_runs:
benchmark_nowutc = datetime.now(timezone.utc)
benchmark_result = benchmark_runner.benchmark(action_group)
benchmark_result["datetime_utc"] = str(report_nowutc)
benchmark_results.append(benchmark_result)
try:
logger.info(f"Pushing benchmark result '{report_name}'")
s3.Object(BENCHMARKING_S3_BUCKET, f"benchmark_results/{report_name}").put(
Body=json.dumps(
dict(
timestamp_utc=round(report_nowutc.timestamp()),
benchmark_results=benchmark_results,
),
indent=4,
sort_keys=True,
),
ContentType="application/json",
)
except botocore.exceptions.ClientError as e:
logger.error(f"Caught error uploading archive '{report_name}': {e}")
s3_aggregate_benchmark_results(ctx)
# TODO remove older benchmarks
@task
def run_benchmark_from_local_config(
ctx, config_path, house_from_s3=True, houses_path="./unity/Assets/Resources/rooms"
):
import copy
from ai2thor.benchmarking import BENCHMARKING_S3_BUCKET, UnityActionBenchmarkRunner
client = boto3.client("s3")
response = client.list_objects_v2(
Bucket=BENCHMARKING_S3_BUCKET, Prefix="benchmark_jobs/"
)
s3 = boto3.resource("s3", region_name="us-west-2")
benchmark_runs = []
key = config_path
if key.split(".")[-1] == "json":
print(key)
procedural_houses_transformed = []
with open(config_path, "r") as f:
benchmark_run_config = json.load(f)
if "procedural_houses" in benchmark_run_config:
if not house_from_s3:
for procedural_house in benchmark_run_config["procedural_houses"]:
if isinstance(procedural_house, str):
house_path = os.path.join(houses_path, procedural_house)
with open(house_path, "r") as f:
house_json = json.load(f)
house_json["id"] = procedural_house
procedural_houses_transformed.append(house_json)
elif isinstance(procedural_house, dict):
procedural_houses_transformed.append(procedural_house)
else:
for procedural_house in benchmark_run_config["procedural_houses"]:
if isinstance(procedural_house, str):
house_obj = s3.Object(
BENCHMARKING_S3_BUCKET,
f"procedural_houses/{procedural_house}",
)
house_json = json.loads(
house_obj.get()["Body"].read().decode("utf-8")
)
if "id" not in house_json:
house_json["id"] = procedural_house.split(".")[0]
procedural_houses_transformed.append(house_json)
elif isinstance(procedural_house, dict):
procedural_houses_transformed.append(procedural_house)
benchmark_run_config["procedural_houses"] = procedural_houses_transformed
# benchmark_run_config['verbose'] = True
action_groups = copy.deepcopy(benchmark_run_config["action_groups"])
del benchmark_run_config["action_groups"]
benchmark_runs.append(
(UnityActionBenchmarkRunner(**benchmark_run_config), action_groups)
)
benchmark_results = []
for benchmark_runner, action_group in benchmark_runs:
benchmark_result = benchmark_runner.benchmark(action_group)
benchmark_results.append(benchmark_result)
print(benchmark_result)
with open("out_3.json", "w") as f:
json.dump(benchmark_results[0], f, indent=4)
# TODO remove older benchmarks
@task
def add_daily_benchmark_config(ctx, benchmark_config_filename):
import json
import os
from ai2thor.benchmarking import BENCHMARKING_S3_BUCKET
path = os.path.dirname(os.path.realpath(__file__))
benchmark_config_basename = os.path.basename(benchmark_config_filename)
print(path)
benchmarking_config_schema = None
s3 = boto3.resource("s3", region_name="us-west-2")
with open(
os.path.join(path, "ai2thor", "benchmarking", "benchmark_config_schema.json"),
"r",
) as f:
benchmarking_config_schema = json.load(f)
with open(benchmark_config_filename, "r") as f:
benchmark_config = json.load(f)
# Validation broken, giving false negative
# validate(benchmark_config, schema=benchmarking_config_schema)
try:
logger.info(f"Pushing benchmark config '{benchmark_config_basename}'")
s3.Object(
BENCHMARKING_S3_BUCKET, f"benchmark_jobs/{benchmark_config_basename}"
).put(
Body=json.dumps(benchmark_config, indent=4),
ContentType="application/json",
)
except botocore.exceptions.ClientError as e:
logger.error(
f"Caught error uploading archive '{benchmark_config_basename}': {e}"
)
@task
def s3_aggregate_benchmark_results(ctx):
from ai2thor.benchmarking import BENCHMARKING_S3_BUCKET
bucket = BENCHMARKING_S3_BUCKET
client = boto3.client("s3")
response = client.list_objects_v2(Bucket=bucket, Prefix="benchmark_results/")
s3 = boto3.resource("s3", region_name="us-west-2")
history = []
for content in response.get("Contents", []):
key = content["Key"]
if key.split(".")[-1] == "json" and os.path.basename(key) != "history.json":
print(f"Key: {key}")
obj = s3.Object(bucket, content["Key"])
benchmark_run = json.loads(obj.get()["Body"].read().decode("utf-8"))
history.append(benchmark_run)
history_name = "history.json"
try:
logger.info(f"Pushing benchmark result '{history_name}'")
s3.Object(bucket, f"benchmark_results/{history_name}").put(
Body=json.dumps(history, indent=4),
ContentType="application/json",
)
except botocore.exceptions.ClientError as e:
logger.error(f"Caught error uploading archive '{report_name}': {e}")
@task
def test_create_prefab(ctx, json_path):
import json
import ai2thor.controller
controller = ai2thor.controller.Controller(
local_executable_path=None,
local_build=True,
# commit_id="2853447e90775ca4e6714ab4a6a8d4a1e36524e9",
start_unity=True,
scene="Procedural",
gridSize=0.25,
width=300,
height=300,
server_class=ai2thor.fifo_server.FifoServer,
visibilityScheme="Distance",
)
with open(json_path, "r") as f:
create_prefab_action = json.load(f)
print(f"build dirs: {controller._build.base_dir} {controller._build.tmp_dir}")
evt = controller.step(**(create_prefab_action[0]))
print(f"Action success: {evt.metadata['lastActionSuccess']}")
print(f'Error: {evt.metadata["errorMessage"]}')
print(f"ActionReturn: {evt.metadata['actionReturn']}")
@task
def procedural_asset_hook_test(ctx, asset_dir, house_path, asset_id=""):
import json
import ai2thor.controller
from ai2thor.hooks.procedural_asset_hook import ProceduralAssetHookRunner
hook_runner = ProceduralAssetHookRunner(
asset_directory=asset_dir, asset_symlink=True, verbose=True
)
controller = ai2thor.controller.Controller(
# local_executable_path="unity/builds/thor-OSXIntel64-local/thor-OSXIntel64-local.app/Contents/MacOS/AI2-THOR",
local_build=True,
# commit_id="3a4efefd5de1f2d455bd11c3d53da020c7a76f3b",
start_unity=True,
scene="Procedural",
gridSize=0.25,
width=300,
height=300,
server_class=ai2thor.fifo_server.FifoServer,
visibilityScheme="Distance",
action_hook_runner=hook_runner,
)
with open(house_path, "r") as f:
house = json.load(f)
instance_id = "asset_0"
if asset_id != "":
house["objects"] = [
{
"assetId": asset_id,
"id": instance_id,
"kinematic": True,
"position": {"x": 0, "y": 0, "z": 0},
"rotation": {"x": 0, "y": 0, "z": 0},
"layer": "Procedural2",
"material": None,
}
]
evt = controller.step(action="CreateHouse", house=house)
print(
f"Action {controller.last_action['action']} success: {evt.metadata['lastActionSuccess']}"
)
print(f'Error: {evt.metadata["errorMessage"]}')
evt = controller.step(dict(action="LookAtObjectCenter", objectId=instance_id))
print(
f"Action {controller.last_action['action']} success: {evt.metadata['lastActionSuccess']}"
)
print(f'Error: {evt.metadata["errorMessage"]}')
@task
def procedural_asset_cache_test(
ctx, asset_dir, house_path, asset_ids="", cache_limit=1
):
import json
import ai2thor.controller
from ai2thor.hooks.procedural_asset_hook import ProceduralAssetHookRunner
hook_runner = ProceduralAssetHookRunner(
asset_directory=asset_dir, asset_symlink=True, verbose=True, asset_limit=1
)
controller = ai2thor.controller.Controller(
# local_executable_path="unity/builds/thor-OSXIntel64-local/thor-OSXIntel64-local.app/Contents/MacOS/AI2-THOR",
# local_build=True,
# commit_id="3a4efefd5de1f2d455bd11c3d53da020c7a76f3b",
start_unity=False,
port=8200,
scene="Procedural",
gridSize=0.25,
width=300,
height=300,
server_class=ai2thor.wsgi_server.WsgiServer,
visibilityScheme="Distance",
action_hook_runner=hook_runner,
)
asset_ids = asset_ids.split(",")
with open(house_path, "r") as f:
house = json.load(f)
instance_id = "asset_0"
house["objects"] = []
middle = int(len(asset_ids) / 2)
i = 0
# print(asset_ids[:middle])
for asset_id in asset_ids[middle:]:
house["objects"].append(
{
"assetId": asset_id,
"id": f"{instance_id}_{i}",
"kinematic": True,
"position": {"x": 0, "y": 0, "z": i * 10},
"rotation": {"x": 0, "y": 0, "z": 0},
"layer": "Procedural2",
"material": None,
}
)
i += 1
evt = controller.step(action="CreateHouse", house=house)
print(
f"Action {controller.last_action['action']} success: {evt.metadata['lastActionSuccess']}"
)
print(f'Error: {evt.metadata["errorMessage"]}')
evt = controller.step(
dict(action="LookAtObjectCenter", objectId=f"{instance_id}_0")
)
# while True:
# pass
print(
f"Action {controller.last_action['action']} success: {evt.metadata['lastActionSuccess']}"
)
print(f'Error: {evt.metadata["errorMessage"]}')
evt = controller.step(action="GetLRUCacheKeys")
print(
f"Action {controller.last_action['action']} success: {evt.metadata['lastActionSuccess']}"
)
print(f'Error: {evt.metadata["errorMessage"]}')
print(f'return {evt.metadata["actionReturn"]}')
controller.reset()
print(f"mid: {middle} ")
# house["objects"] = house["objects"][middle:]
i = 0
# print(asset_ids[:middle])
for asset_id in asset_ids[:middle]:
house["objects"].append(
{
"assetId": asset_id,
"id": f"{instance_id}_{i}",
"kinematic": True,
"position": {"x": 0, "y": 0, "z": i * 10},
"rotation": {"x": 0, "y": 0, "z": 0},
"layer": "Procedural2",
"material": None,
}
)
i += 1
evt = controller.step(action="CreateHouse", house=house)
print(
f"Action {controller.last_action['action']} success: {evt.metadata['lastActionSuccess']}"
)
print(f'Error: {evt.metadata["errorMessage"]}')
controller.reset()
evt = controller.step(action="GetLRUCacheKeys")
print(
f"Action {controller.last_action['action']} success: {evt.metadata['lastActionSuccess']}"
)
print(f'Error: {evt.metadata["errorMessage"]}')
print(f'return {evt.metadata["actionReturn"]}')
| ai2thor-main | tasks.py |
from setuptools import setup, find_packages
import os
__version__ = "0.0.1"
if os.path.isfile("ai2thor/_version.py"):
exec(open("ai2thor/_version.py").read())
this_directory = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(this_directory, "README.md"), encoding="utf-8") as f:
long_description = f.read()
VERSION = __version__
setup(
name="ai2thor",
version=VERSION,
description="AI2-THOR: A Near Photo-Realistic Interactable Framework for Embodied AI Agents",
long_description=long_description,
license="Apache License 2.0",
long_description_content_type="text/markdown",
classifiers=[
"Intended Audience :: Science/Research",
"Development Status :: 5 - Production/Stable",
"License :: OSI Approved :: Apache Software License",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Programming Language :: Python",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Operating System :: MacOS",
"Operating System :: Unix",
],
keywords="AI2-THOR, Allen AI, Python, Reinforcement Learning, Computer Vision, Artificial Intelligence",
url="https://github.com/allenai/ai2thor",
author="Allen Institute for AI",
author_email="[email protected]",
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
install_requires=[
"flask",
"numpy",
"pyyaml",
"requests",
"progressbar2",
"botocore",
"aws-requests-auth",
"msgpack",
"Pillow",
"python-xlib",
"opencv-python",
"werkzeug>=0.15.0", # needed for unix socket support
],
setup_requires=["pytest-runner"],
tests_require=["pytest", "pytest-timeout", "pytest-cov", "jsonschema", "shapely", "pytest-mock", "dictdiffer"],
scripts=["scripts/ai2thor-xorg"],
include_package_data=False,
)
| ai2thor-main | setup.py |
from aws_requests_auth.boto_utils import BotoAWSRequestsAuth
import os
import requests
import json
from ai2thor.util import makedirs
import ai2thor.downloader
import zipfile
import logging
from ai2thor.util.lock import LockSh, LockEx
from ai2thor.util import atomic_write
import io
from ai2thor.platform import STR_PLATFORM_MAP, OSXIntel64, Linux64, CloudRendering, StandaloneWindows64
import platform
logger = logging.getLogger(__name__)
PUBLIC_S3_BUCKET = "ai2-thor-public"
PUBLIC_WEBGL_S3_BUCKET = "ai2-thor-webgl-public"
PRIVATE_S3_BUCKET = "ai2-thor-private"
PYPI_S3_BUCKET = "ai2-thor-pypi"
TEST_OUTPUT_DIRECTORY = "../../images-debug"
LOCAL_BUILD_COMMIT_ID = "local"
AUTO_BUILD_PLATFORMS = [OSXIntel64, Linux64, CloudRendering]
COMMIT_ID = None
try:
import ai2thor._builds
COMMIT_ID = ai2thor._builds.COMMIT_ID
except ImportError:
pass
def build_name(arch, commit_id, include_private_scenes=False):
if include_private_scenes:
return "thor-private-%s-%s" % (arch, commit_id)
else:
return "thor-%s-%s" % (arch, commit_id)
def boto_auth():
return BotoAWSRequestsAuth(
aws_host="s3-us-west-2.amazonaws.com", aws_region="us-west-2", aws_service="s3"
)
base_url = "http://s3-us-west-2.amazonaws.com/%s/" % PUBLIC_S3_BUCKET
private_base_url = "http://s3-us-west-2.amazonaws.com/%s/" % PRIVATE_S3_BUCKET
# dummy build when connecting to the editor
class EditorBuild(object):
def __init__(self):
# assuming that an external build supports both server types
self.server_types = ["FIFO", "WSGI"]
self.url = None
self.unity_proc = None
external_system_platforms = dict(Linux=Linux64, Darwin=OSXIntel64, Windows=StandaloneWindows64)
self.platform = external_system_platforms[platform.system()]
def download(self):
pass
def unlock(self):
pass
def lock_sh(self):
pass
class ExternalBuild(object):
def __init__(self, executable_path):
self.executable_path = executable_path
external_system_platforms = dict(Linux=Linux64, Darwin=OSXIntel64)
self.platform = external_system_platforms[platform.system()]
# assuming that an external build supports both server types
self.server_types = ["FIFO", "WSGI"]
def download(self):
pass
def unlock(self):
pass
def lock_sh(self):
pass
class Build(object):
def __init__(self, platform, commit_id, include_private_scenes, releases_dir=None):
if type(platform) is str:
if platform in STR_PLATFORM_MAP:
platform = STR_PLATFORM_MAP[platform]
else:
raise ValueError("Invalid platform: %s" % platform)
self.platform = platform
self.commit_id = commit_id
self.include_private_scenes = include_private_scenes
self.releases_dir = releases_dir
self.tmp_dir = None
if self.releases_dir:
self.tmp_dir = os.path.normpath(self.releases_dir + "/../tmp")
def download(self):
makedirs(self.releases_dir)
makedirs(self.tmp_dir)
with LockEx(os.path.join(self.tmp_dir, self.name)):
if not os.path.isdir(self.base_dir):
z = self.zipfile()
# use tmpdir instead or a random number
extract_dir = os.path.join(self.tmp_dir, self.name)
logger.debug("Extracting zipfile %s" % os.path.basename(self.url))
z.extractall(extract_dir)
os.rename(extract_dir, self.base_dir)
# This can be removed after migrating OSXIntel64 builds to have the AI2Thor executable
if os.path.exists(self.old_executable_path) and not os.path.exists(
self.executable_path
):
os.link(self.old_executable_path, self.executable_path)
# we can lose the executable permission when unzipping a build
os.chmod(self.executable_path, 0o755)
else:
logger.debug("%s exists - skipping download" % (self.executable_path,))
def zipfile(self):
zip_data = ai2thor.downloader.download(
self.url, self.sha256(), self.include_private_scenes
)
return zipfile.ZipFile(io.BytesIO(zip_data))
def download_metadata(self):
# this can happen if someone has an existing release without the metadata
# built prior to the backfill
# can add check to see if metadata has expired/we update metadata
# if we want to add more info to metadata
if not self.metadata:
z = self.zipfile()
atomic_write(self.metadata_path, z.read("metadata.json"))
@property
def base_dir(self):
return os.path.join(self.releases_dir, self.name)
@property
def old_executable_path(self):
return self.platform.old_executable_path(self.base_dir, self.name)
@property
def executable_path(self):
return self.platform.executable_path(self.base_dir, self.name)
@property
def metadata_path(self):
return os.path.join(self.base_dir, "metadata.json")
@property
def metadata(self):
if os.path.isfile(self.metadata_path):
with open(self.metadata_path, "r") as f:
return json.loads(f.read())
else:
return None
@property
def server_types(self):
self.download_metadata()
return self.metadata.get("server_types", [])
def auth(self):
if self.include_private_scenes:
return boto_auth()
else:
return None
def _base_url(self):
if self.include_private_scenes:
return private_base_url
else:
return base_url
@property
def url(self):
return self._base_url() + os.path.join("builds", self.name + ".zip")
@property
def name(self):
return build_name(
self.platform.name(), self.commit_id, self.include_private_scenes
)
def unlock(self):
if self._lock:
self._lock.unlock()
self._lock = None
def lock_sh(self):
self._lock = LockSh(self.base_dir)
self._lock.lock()
@property
def log_url(self):
return os.path.splitext(self.url)[0] + ".log"
@property
def metadata_url(self):
return os.path.splitext(self.url)[0] + ".json"
@property
def sha256_url(self):
return os.path.splitext(self.url)[0] + ".sha256"
def exists(self):
return (self.releases_dir and os.path.isdir(self.base_dir)) or (
requests.head(self.url, auth=self.auth()).status_code == 200
)
def log_exists(self):
return requests.head(self.log_url, auth=self.auth()).status_code == 200
def sha256(self):
res = requests.get(self.sha256_url, auth=self.auth())
res.raise_for_status()
return res.content.decode("ascii")
| ai2thor-main | ai2thor/build.py |
# Copyright Allen Institute for Artificial Intelligence 2017
"""
ai2thor.server
Handles all communication with Unity through a Flask service. Messages
are sent to the controller using a pair of request/response queues.
"""
import json
import subprocess
import warnings
from abc import abstractmethod, ABC
from collections.abc import Mapping
from enum import Enum
from typing import Optional, Tuple, Dict, cast, List, Set
import numpy as np
from ai2thor.util.depth import apply_real_noise, generate_noise_indices
class NumpyAwareEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, np.generic):
return obj.item()
return super(NumpyAwareEncoder, self).default(obj)
class LazyMask(Mapping):
def __contains__(self, key: object) -> bool:
return self.mask(cast(str, key)) is not None
def __getitem__(self, key: str):
m = self.mask(key)
if m is None:
raise KeyError(key)
return m
@abstractmethod
def mask(self, key: str, default: Optional[np.ndarray]=None) -> Optional[np.ndarray]:
pass
@abstractmethod
def _load_all(self):
pass
def __iter__(self):
self._load_all()
return iter(self._masks)
def __len__(self):
self._load_all()
return len(self._masks)
class LazyInstanceSegmentationMasks(LazyMask):
def __init__(self, image_ids_data: bytes, metadata: dict):
self._masks: Dict[str, np.ndarray] = {}
self._loaded = False
screen_width = metadata["screenWidth"]
screen_height = metadata["screenHeight"]
item_size = int(len(image_ids_data)/(screen_width * screen_height))
self._unique_integer_keys: Optional[Set[np.uint32]] = None
self._empty_mask: Optional[np.ndarray] = None
if item_size == 4:
self.instance_segmentation_frame_uint32 = read_buffer_image(
image_ids_data, screen_width, screen_height, dtype=np.uint32
).squeeze()
# within ImageSynthesis.cs a RGBA32 frame is read, the alpha channel
# is always equal to 255
self._alpha_channel_value = 255
elif item_size == 3: # 3 byte per pixel for backwards compatibility, RGB24 texture
# this is more expensive than the 4 byte variant since copying is required
frame = read_buffer_image(image_ids_data, screen_width, screen_height)
self.instance_segmentation_frame_uint32 = np.concatenate(
(
frame,
np.zeros((frame.shape[0], frame.shape[1], 1), dtype=np.uint8),
),
axis=2,
)
self.instance_segmentation_frame_uint32.dtype = np.uint32
self.instance_segmentation_frame_uint32 = self.instance_segmentation_frame_uint32.squeeze()
self._alpha_channel_value = 0
# At this point we should have a 2d matrix of shape (height, width)
# with a 32bit uint as the value
self.instance_colors: Dict[str, List[int]]= {}
self.class_colors: Dict[str, List[List[int]]] = {}
for c in metadata["colors"]:
cls = c["name"]
if "|" in c["name"]:
self.instance_colors[c["name"]] = c["color"]
cls = c["name"].split("|")[0]
if cls not in self.class_colors:
self.class_colors[cls] = []
self.class_colors[cls].append(c["color"])
@property
def empty_mask(self) -> np.ndarray:
if self._empty_mask is None:
self._empty_mask = np.zeros(self.instance_segmentation_frame_uint32.shape, dtype=bool)
self._empty_mask.flags["WRITEABLE"] = False
return self._empty_mask
@property
def unique_integer_keys(self) -> Set[np.uint32]:
if self._unique_integer_keys is None:
self._unique_integer_keys = set(np.unique(self.instance_segmentation_frame_uint32))
return self._unique_integer_keys
def _integer_color_key(self, color: List[int]) -> np.uint32:
a = np.array(color + [self._alpha_channel_value], dtype=np.uint8)
# mypy complains, but it is safe to modify the dtype on an ndarray
a.dtype = np.uint32 # type: ignore
return a[0]
def _load_all(self):
if not self._loaded:
all_integer_keys = self.unique_integer_keys
for color_name, color in self.instance_colors.items():
if self._integer_color_key(color) in all_integer_keys:
self.__getitem__(color_name)
self._loaded = True
def mask(self, key: str, default: Optional[np.ndarray]=None) -> Optional[np.ndarray]:
if key not in self.instance_colors:
return default
elif key in self._masks:
return self._masks[key]
m = self.instance_segmentation_frame_uint32 == self._integer_color_key(
self.instance_colors[key]
)
if m.any():
self._masks[key] = m
return m
else:
return default
class LazyClassSegmentationMasks(LazyMask):
def __init__(self, instance_masks: LazyInstanceSegmentationMasks):
self.instance_masks = instance_masks
self._loaded = False
self._masks: Dict[str, np.ndarray] = {}
def _load_all(self):
if not self._loaded:
all_integer_keys = self.instance_masks.unique_integer_keys
for cls, colors in self.instance_masks.class_colors.items():
for color in colors:
if self.instance_masks._integer_color_key(color) in all_integer_keys:
self.__getitem__(cls)
break
self._loaded = True
def mask(self, key: str, default: Optional[np.ndarray]=None) -> Optional[np.ndarray]:
if key in self._masks:
return self._masks[key]
class_mask = np.zeros(self.instance_masks.instance_segmentation_frame_uint32.shape, dtype=bool)
if key == "background":
# "background" is a special name for any color that wasn't included in the metadata
# this is mainly done for backwards compatibility since we only have a handful of instances
# of this across all scenes (e.g. FloorPlan412 - thin strip above the doorway)
all_integer_keys = self.instance_masks.unique_integer_keys
metadata_color_keys = set()
for cls, colors in self.instance_masks.class_colors.items():
for color in colors:
metadata_color_keys.add(self.instance_masks._integer_color_key(color))
background_keys = all_integer_keys - metadata_color_keys
for ik in background_keys:
mask = self.instance_masks.instance_segmentation_frame_uint32 == ik
class_mask = np.logical_or(class_mask, mask)
elif "|" not in key:
for color in self.instance_masks.class_colors.get(key, []):
mask = self.instance_masks.instance_segmentation_frame_uint32 == self.instance_masks._integer_color_key(color)
class_mask = np.logical_or(class_mask, mask)
if class_mask.any():
self._masks[key] = class_mask
return class_mask
else:
return default
class LazyDetections2D(Mapping):
def __init__(self, instance_masks: LazyInstanceSegmentationMasks):
self.instance_masks = instance_masks
def mask_bounding_box(self, mask: np.ndarray) -> Optional[Tuple[int, int, int, int]]:
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
rw = np.where(rows)
if len(rw[0]) == 0:
return None
rmin, rmax = map(int, rw[0][[0, -1]])
cmin, cmax = map(int, np.where(cols)[0][[0, -1]])
return cmin, rmin, cmax, rmax
def __contains__(self, key: object) -> bool:
return key in self.instance_masks
def __eq__(self, other: object):
if isinstance(other, self.__class__):
return self.instance_masks == other.instance_masks
else:
return False
class LazyInstanceDetections2D(LazyDetections2D):
def __init__(self, instance_masks: LazyInstanceSegmentationMasks):
super().__init__(instance_masks)
self._detections2d : Dict[str, Optional[Tuple[int, int, int, int]]] = {}
def __eq__(self, other: object):
if isinstance(other, self.__class__):
return self.instance_masks == other.instance_masks
else:
return False
def mask_bounding_box(self, mask: np.ndarray) -> Optional[Tuple[int, int, int, int]]:
rows = np.any(mask, axis=1)
cols = np.any(mask, axis=0)
rw = np.where(rows)
if len(rw[0]) == 0:
return None
rmin, rmax = map(int, rw[0][[0, -1]])
cmin, cmax = map(int, np.where(cols)[0][[0, -1]])
return cmin, rmin, cmax, rmax
def __contains__(self, key: object) -> bool:
return key in self.instance_masks
def __getitem__(self, key: str) -> Optional[Tuple[int, int, int, int]]:
if key in self._detections2d:
return self._detections2d[key]
mask = self.instance_masks[key]
self._detections2d[key] = self.mask_bounding_box(mask)
return self._detections2d[key]
def __len__(self) -> int:
return len(self.instance_masks)
def __iter__(self):
return iter(self.instance_masks.keys())
class LazyClassDetections2D(LazyDetections2D):
def __init__(self, instance_masks: LazyInstanceSegmentationMasks):
super().__init__(instance_masks)
self._loaded = False
self._detections2d : Dict[str, Optional[Tuple[Tuple[int, int, int, int], ...]]] = {}
def __eq__(self, other: object):
if isinstance(other, self.__class__):
return self.instance_masks == other.instance_masks
else:
return False
def __len__(self) -> int:
self._load_all()
return len(self._detections2d)
def _load_all(self):
if not self._loaded:
all_integer_keys = self.instance_masks.unique_integer_keys
for cls, colors in self.instance_masks.class_colors.items():
for color in colors:
if self.instance_masks._integer_color_key(color) in all_integer_keys:
self.__getitem__(cls)
break
self._loaded = True
def __iter__(self):
self._load_all()
return iter(self._detections2d)
def __getitem__(self, cls: str) -> Optional[Tuple[Tuple[int, int, int, int], ...]]:
if cls in self._detections2d:
return self._detections2d[cls]
detections = []
for color in self.instance_masks.class_colors.get(cls, []):
mask = self.instance_masks.instance_segmentation_frame_uint32 == self.instance_masks._integer_color_key(color)
bb = self.mask_bounding_box(mask)
if bb:
detections.append(bb)
if detections:
self._detections2d[cls] = tuple(detections)
else:
raise KeyError(cls)
return self._detections2d[cls]
class MultiAgentEvent(object):
def __init__(self, active_agent_id, events):
self._active_event = events[active_agent_id]
self.metadata = self._active_event.metadata
self.screen_width = self._active_event.screen_width
self.screen_height = self._active_event.screen_height
self.events = events
self.third_party_camera_frames = []
# XXX add methods for depth,sem_seg
def __bool__(self):
return bool(self._active_event)
@property
def cv2img(self):
return self._active_event.cv2img
def add_third_party_camera_image(self, third_party_image_data):
self.third_party_camera_frames.append(
read_buffer_image(
third_party_image_data, self.screen_width, self.screen_height
)
)
def read_buffer_image(
buf, width, height, flip_y=True, flip_x=False, dtype=np.uint8, flip_rb_colors=False
):
im_bytes = np.frombuffer(buf, dtype=dtype)
im = im_bytes.reshape(height, width, -1)
if flip_y:
im = np.flip(im, axis=0)
if flip_x:
im = np.flip(im, axis=1)
if flip_rb_colors:
im = im[..., ::-1]
return im
def unique_rows(arr, return_index=False, return_inverse=False):
arr = np.ascontiguousarray(arr).copy()
b = arr.view(np.dtype((np.void, arr.dtype.itemsize * arr.shape[1])))
if return_inverse:
_, idx, inv = np.unique(b, return_index=True, return_inverse=True)
else:
_, idx = np.unique(b, return_index=True)
unique = arr[idx]
if return_index and return_inverse:
return unique, idx, inv
elif return_index:
return unique, idx
elif return_inverse:
return unique, inv
else:
return unique
class MetadataWrapper(dict):
def __getitem__(self, x):
# alias deprecated functionality
if x == "reachablePositions":
last_action = super().__getitem__("lastAction")
if last_action == "GetReachablePositions":
warnings.warn(
'The key event.metadata["reachablePositions"] is deprecated and has been remapped to event.metadata["actionReturn"].'
)
x = "actionReturn"
elif last_action == "GetSceneBounds":
# Undocumented GetSceneBounds used to only populate reachablePositions,
# and not actionReturn. This now maintains both sideways and
# backwards compatibility in such a case.
if "reachablePositions" in self:
return super().__getitem__(x)
else:
warnings.warn(
'The key event.metadata["reachablePositions"] is deprecated and has been remapped to event.metadata["actionReturn"].'
)
x = "actionReturn"
else:
raise IndexError(
"You are trying to access event.metadata['reachablePositions'] without first "
+ "calling controller.step(action='GetReachablePositions'). Also, "
+ "the key 'reachablePositions' is deprecated in favor of event.metadata['actionReturn']."
)
elif x == "hand":
if "hand" not in self:
# maintains sideways compatibility
warnings.warn(
'The key event.metadata["hand"] is deprecated and has been remapped to event.metadata["heldObjectPose"].'
)
x = "heldObjectPose"
return super().__getitem__(x)
class Event:
"""
Object that is returned from a call to controller.step().
This class wraps the screenshot that Unity captures as well
as the metadata sent about each object
"""
def __init__(self, metadata):
self.metadata = MetadataWrapper(metadata)
self.screen_width = metadata["screenWidth"]
self.screen_height = metadata["screenHeight"]
self.frame = None
self.depth_frame = None
self.normals_frame = None
self.flow_frame = None
self.color_to_object_id = {}
self.object_id_to_color = {}
self.instance_detections2D = None
self.instance_masks = {}
self.class_masks = {}
self.instance_segmentation_frame = None
self.semantic_segmentation_frame = None
self.class_detections2D = {}
self.process_colors()
self.process_visible_bounds2D()
self.third_party_instance_masks = []
self.third_party_class_masks = []
self.third_party_camera_frames = []
self.third_party_semantic_segmentation_frames = []
self.third_party_instance_segmentation_frames = []
self.third_party_depth_frames = []
self.third_party_normals_frames = []
self.third_party_flows_frames = []
self.events = [self] # Ensure we have a similar API to MultiAgentEvent
def __bool__(self):
return self.metadata["lastActionSuccess"]
def __repr__(self):
"""Summarizes the results from an Event."""
action_return = str(self.metadata["actionReturn"])
if len(action_return) > 100:
action_return = action_return[:100] + "..."
return (
"<ai2thor.server.Event at "
+ str(hex(id(self)))
+ '\n .metadata["lastAction"] = '
+ str(self.metadata["lastAction"])
+ '\n .metadata["lastActionSuccess"] = '
+ str(self.metadata["lastActionSuccess"])
+ '\n .metadata["errorMessage"] = "'
+ str(self.metadata["errorMessage"]).replace("\n", " ")
+ '\n .metadata["actionReturn"] = '
+ action_return
+ "\n>"
)
def __str__(self):
return self.__repr__()
@property
def image_data(self):
warnings.warn(
"Event.image_data has been removed - RGB data can be retrieved from event.frame and encoded to an image format"
)
return None
@property
def class_segmentation_frame(self):
warnings.warn(
"event.class_segmentation_frame has been renamed to event.semantic_segmentation_frame.",
DeprecationWarning,
)
return self.semantic_segmentation_frame
def process_visible_bounds2D(self):
if self.instance_detections2D and len(self.instance_detections2D) > 0:
for obj in self.metadata["objects"]:
obj["visibleBounds2D"] = (
obj["visible"] and obj["objectId"] in self.instance_detections2D
)
def process_colors(self):
if "colors" in self.metadata and self.metadata["colors"]:
for color_data in self.metadata["colors"]:
name = color_data["name"]
c_key = tuple(color_data["color"])
self.color_to_object_id[c_key] = name
self.object_id_to_color[name] = c_key
def objects_by_type(self, object_type):
return [
obj for obj in self.metadata["objects"] if obj["objectType"] == object_type
]
def process_colors_ids(self, image_ids_data):
self.instance_masks = LazyInstanceSegmentationMasks(image_ids_data, self.metadata)
self.class_masks = LazyClassSegmentationMasks(self.instance_masks)
self.class_detections2D = LazyClassDetections2D(self.instance_masks)
self.instance_detections2D = LazyInstanceDetections2D(self.instance_masks)
def _image_depth(self, image_depth_data, **kwargs):
item_size = int(len(image_depth_data)/(self.screen_width * self.screen_height))
multipliers = {
DepthFormat.Normalized: 1.0,
DepthFormat.Meters: (kwargs["camera_far_plane"] - kwargs["camera_near_plane"]),
DepthFormat.Millimeters: (kwargs["camera_far_plane"] - kwargs["camera_near_plane"]) * 1000.0
}
target_depth_format = kwargs["depth_format"]
# assume Normalized for backwards compatibility
source_depth_format = DepthFormat[self.metadata.get("depthFormat", "Normalized")]
multiplier = multipliers[target_depth_format]/multipliers[source_depth_format]
if item_size == 4: # float32
image_depth_out = read_buffer_image(
image_depth_data, self.screen_width, self.screen_height, dtype=np.float32
).squeeze()
elif item_size == 3: # 3 byte 1/256.0 precision, legacy depth binary format
image_depth = read_buffer_image(
image_depth_data, self.screen_width, self.screen_height
)
image_depth_out = (
image_depth[:, :, 0]
+ image_depth[:, :, 1] / np.float32(256)
+ image_depth[:, :, 2] / np.float32(256 ** 2)
)
multiplier /= 256.0
else:
raise Exception("invalid shape for depth image %s" % (image_depth.shape,))
if multiplier != 1.0:
if not image_depth_out.flags["WRITEABLE"]:
image_depth_out = np.copy(image_depth_out)
image_depth_out *= multiplier
if "add_noise" in kwargs and kwargs["add_noise"]:
image_depth_out = apply_real_noise(
image_depth_out, self.screen_width, indices=kwargs["noise_indices"]
)
return image_depth_out
def add_third_party_camera_image_robot(self, third_party_image_data, width, height):
self.third_party_camera_frames.append(
read_buffer_image(third_party_image_data, width, height)
)
def add_third_party_image_depth_robot(
self, image_depth_data, depth_format, **kwargs
):
multiplier = 1.0
camera_far_plane = kwargs.pop("camera_far_plane", 1)
camera_near_plane = kwargs.pop("camera_near_plane", 0)
depth_width = kwargs.pop("depth_width", self.screen_width)
depth_height = kwargs.pop("depth_height", self.screen_height)
if depth_format == DepthFormat.Normalized:
multiplier = 1.0 / (camera_far_plane - camera_near_plane)
elif depth_format == DepthFormat.Millimeters:
multiplier = 1000.0
image_depth = (
read_buffer_image(
image_depth_data, depth_width, depth_height, **kwargs
).reshape(depth_height, depth_width)
* multiplier
)
self.third_party_depth_frames.append(image_depth.astype(np.float32))
def add_image_depth_robot(self, image_depth_data, depth_format, **kwargs):
multiplier = 1.0
camera_far_plane = kwargs.pop("camera_far_plane", 1)
camera_near_plane = kwargs.pop("camera_near_plane", 0)
depth_width = kwargs.pop("depth_width", self.screen_width)
depth_height = kwargs.pop("depth_height", self.screen_height)
if depth_format == DepthFormat.Normalized:
multiplier = 1.0 / (camera_far_plane - camera_near_plane)
elif depth_format == DepthFormat.Millimeters:
multiplier = 1000.0
image_depth = (
read_buffer_image(
image_depth_data, depth_width, depth_height, **kwargs
).reshape(depth_height, depth_width)
* multiplier
)
self.depth_frame = image_depth.astype(np.float32)
def add_image_depth(self, image_depth_data, **kwargs):
self.depth_frame = self._image_depth(image_depth_data, **kwargs)
def add_third_party_image_depth(self, image_depth_data, **kwargs):
self.third_party_depth_frames.append(
self._image_depth(image_depth_data, **kwargs)
)
def add_third_party_image_normals(self, normals_data):
self.third_party_normals_frames.append(
read_buffer_image(normals_data, self.screen_width, self.screen_height)
)
def add_image_normals(self, image_normals_data):
self.normals_frame = read_buffer_image(
image_normals_data, self.screen_width, self.screen_height
)
def add_third_party_image_flows(self, flows_data):
self.third_party_flows_frames.append(
read_buffer_image(flows_data, self.screen_width, self.screen_height)
)
def add_image_flows(self, image_flows_data):
self.flows_frame = read_buffer_image(
image_flows_data, self.screen_width, self.screen_height
)
def add_third_party_camera_image(self, third_party_image_data):
self.third_party_camera_frames.append(
read_buffer_image(
third_party_image_data, self.screen_width, self.screen_height
)
)
def add_image(self, image_data, **kwargs):
self.frame = read_buffer_image(
image_data, self.screen_width, self.screen_height, **kwargs
)[
:, :, :3
] # CloudRendering returns 4 channels instead of 3
def add_image_ids(self, image_ids_data):
self.instance_segmentation_frame = read_buffer_image(
image_ids_data, self.screen_width, self.screen_height
)[:, :, :3]
self.process_colors_ids(image_ids_data)
def add_third_party_image_ids(self, image_ids_data):
instance_segmentation_frame = read_buffer_image(
image_ids_data, self.screen_width, self.screen_height
)[:, :, :3]
self.third_party_instance_segmentation_frames.append(
instance_segmentation_frame
)
instance_masks = LazyInstanceSegmentationMasks(image_ids_data, self.metadata)
self.third_party_instance_masks.append(instance_masks)
self.third_party_class_masks.append(LazyClassSegmentationMasks(instance_masks))
def add_image_classes(self, image_classes_data):
self.semantic_segmentation_frame = read_buffer_image(
image_classes_data, self.screen_width, self.screen_height
)[:, :, :3]
def add_third_party_image_classes(self, image_classes_data):
self.third_party_semantic_segmentation_frames.append(
read_buffer_image(image_classes_data, self.screen_width, self.screen_height)[:, :, :3]
)
def cv2image(self):
warnings.warn("Deprecated - please use event.cv2img")
return self.cv2img
@property
def cv2img(self):
return self.frame[..., ::-1]
@property
def pose(self):
agent_meta = self.metadata["agent"]
loc = agent_meta["position"]
rotation = round(agent_meta["rotation"]["y"] * 1000)
horizon = round(agent_meta["cameraHorizon"] * 1000)
return (round(loc["x"] * 1000), round(loc["z"] * 1000), rotation, horizon)
@property
def pose_discrete(self):
# XXX should have this as a parameter
step_size = 0.25
agent_meta = self.metadata["agent"]
loc = agent_meta["position"]
rotation = int(agent_meta["rotation"]["y"] / 90.0)
horizon = int(round(agent_meta["cameraHorizon"]))
return (int(loc["x"] / step_size), int(loc["z"] / step_size), rotation, horizon)
def get_object(self, object_id):
for obj in self.metadata["objects"]:
if obj["objectId"] == object_id:
return obj
return None
class DepthFormat(Enum):
Meters = (0,)
Normalized = (1,)
Millimeters = 2
class Server(ABC):
def __init__(
self,
width,
height,
timeout: Optional[float],
depth_format=DepthFormat.Meters,
add_depth_noise=False,
):
self.depth_format = depth_format
self.add_depth_noise = add_depth_noise
self.timeout = timeout
self.noise_indices = None
self.camera_near_plane = 0.1
self.camera_far_plane = 20.0
self.sequence_id = 0
self.started = False
self.client_token = None
self.unity_proc: Optional[subprocess.Popen] = None
if add_depth_noise:
assert width == height, "Noise supported with square dimension images only."
self.noise_indices = generate_noise_indices(width)
def set_init_params(self, init_params):
self.camera_near_plane = init_params["cameraNearPlane"]
self.camera_far_plane = init_params["cameraFarPlane"]
def create_event(self, metadata, files):
if metadata["sequenceId"] != self.sequence_id:
raise ValueError(
"Sequence id mismatch: %s vs %s"
% (metadata["sequenceId"], self.sequence_id)
)
events = []
for i, a in enumerate(metadata["agents"]):
e = Event(a)
image_mapping = dict(
image=e.add_image,
image_depth=lambda x: e.add_image_depth(
x,
depth_format=self.depth_format,
camera_near_plane=self.camera_near_plane,
camera_far_plane=self.camera_far_plane,
add_noise=self.add_depth_noise,
noise_indices=self.noise_indices,
),
image_ids=e.add_image_ids,
image_classes=e.add_image_classes,
image_normals=e.add_image_normals,
image_flow=e.add_image_flows,
)
for key in image_mapping.keys():
if key in files:
image_mapping[key](files[key][i])
third_party_image_mapping = {
# if we want to convert this param to underscores in Unity, we will need to
# keep the mapping with the dash for backwards compatibility with older
# Unity builds
"image-thirdParty-camera": e.add_third_party_camera_image,
"image_thirdParty_depth": lambda x: e.add_third_party_image_depth(
x,
depth_format=self.depth_format,
camera_near_plane=self.camera_near_plane,
camera_far_plane=self.camera_far_plane,
),
"image_thirdParty_image_ids": e.add_third_party_image_ids,
"image_thirdParty_classes": e.add_third_party_image_classes,
"image_thirdParty_normals": e.add_third_party_image_normals,
"image_thirdParty_flows": e.add_third_party_image_flows,
}
if a["thirdPartyCameras"] is not None:
for ti, t in enumerate(a["thirdPartyCameras"]):
for key in third_party_image_mapping.keys():
if key in files:
third_party_image_mapping[key](files[key][ti])
events.append(e)
if len(events) > 1:
self.last_event = MultiAgentEvent(metadata["activeAgentId"], events)
else:
self.last_event = events[0]
return self.last_event
@abstractmethod
def start(self):
raise NotImplementedError
@abstractmethod
def stop(self):
raise NotImplementedError
@abstractmethod
def send(self, action):
raise NotImplementedError
@abstractmethod
def receive(self, timeout: Optional[float] = None):
raise NotImplementedError
| ai2thor-main | ai2thor/server.py |
from flask import Flask, request, make_response
import numpy as np
import random
import msgpack
import werkzeug
import werkzeug.serving
import werkzeug.http
def random_image(height, width):
img = np.zeros(height * width * 3, dtype=np.uint8).reshape(height, width, 3)
img[:, :, 0] = random.randint(0, 255)
img[:, :, 1] = random.randint(0, 255)
img[:, :, 2] = random.randint(0, 255)
return img
class MockServer(object):
def __init__(self, height, width):
app = Flask(__name__)
self.height = height
self.width = width
@app.route("/ping", methods=["GET"])
def ping():
return make_response("PONG")
@app.route("/start", methods=["POST"])
def start():
return make_response(msgpack.packb({"status": 200}, use_bin_type=True))
@app.route("/reset", methods=["POST"])
def reset():
return make_response(msgpack.packb({"status": 200}, use_bin_type=True))
@app.route("/step", methods=["POST"])
def step():
content = request.json
metadata = {
u"sequenceId": content["sequenceId"],
u"agents": [
{
u"agentId": 0,
u"screenHeight": self.height,
u"screenWidth": self.width,
u"lastAction": content["action"],
u"lastActionSuccess": True,
}
],
}
result = {
u"image": [random_image(self.height, self.width).tostring()],
u"image_depth": [],
u"metadata": metadata,
}
out = msgpack.packb(result, use_bin_type=True)
return make_response(out)
self.host = "127.0.0.1"
self.port = 9200
app.config.update(PROPAGATE_EXCEPTIONS=True, JSONIFY_PRETTYPRINT_REGULAR=False)
self.wsgi_server = werkzeug.serving.make_server(self.host, self.port, app)
def start(self):
self.wsgi_server.serve_forever()
| ai2thor-main | ai2thor/mock_real_server.py |
# Copyright Allen Institute for Artificial Intelligence 2017
"""
ai2thor.controller
Primary entrypoint into the Thor API. Provides all the high-level functions
needed to control the in-game agent through ai2thor.server.
"""
import atexit
import copy
import json
import logging
import math
import numbers
import os
import random
import re
import shlex
import shutil
import subprocess
import time
import traceback
import uuid
import warnings
from collections import defaultdict, deque
from functools import lru_cache
from itertools import product
from platform import architecture as platform_architecture
from platform import system as platform_system
from typing import Dict, Any, Union, Optional
import numpy as np
import ai2thor.build
import ai2thor.fifo_server
import ai2thor.platform
import ai2thor.wsgi_server
from ai2thor._quality_settings import DEFAULT_QUALITY, QUALITY_SETTINGS
from ai2thor.exceptions import RestartError, UnityCrashException
from ai2thor.interact import DefaultActions, InteractiveControllerPrompt
from ai2thor.server import DepthFormat
from ai2thor.util import atomic_write, makedirs
from ai2thor.util.lock import LockEx
logger = logging.getLogger(__name__)
RECEPTACLE_OBJECTS = {
"Box": {
"Candle",
"CellPhone",
"Cloth",
"CreditCard",
"Dirt",
"KeyChain",
"Newspaper",
"ScrubBrush",
"SoapBar",
"SoapBottle",
"ToiletPaper",
},
"Cabinet": {
"Bowl",
"BowlDirty",
"Box",
"Bread",
"BreadSliced",
"ButterKnife",
"Candle",
"CellPhone",
"Cloth",
"CoffeeMachine",
"Container",
"ContainerFull",
"CreditCard",
"Cup",
"Fork",
"KeyChain",
"Knife",
"Laptop",
"Mug",
"Newspaper",
"Pan",
"Plate",
"Plunger",
"Pot",
"Potato",
"Sandwich",
"ScrubBrush",
"SoapBar",
"SoapBottle",
"Spoon",
"SprayBottle",
"Statue",
"TissueBox",
"Toaster",
"ToiletPaper",
"WateringCan",
},
"CoffeeMachine": {"MugFilled", "Mug"},
"CounterTop": {
"Apple",
"AppleSlice",
"Bowl",
"BowlDirty",
"BowlFilled",
"Box",
"Bread",
"BreadSliced",
"ButterKnife",
"Candle",
"CellPhone",
"CoffeeMachine",
"Container",
"ContainerFull",
"CreditCard",
"Cup",
"Egg",
"EggFried",
"EggShell",
"Fork",
"HousePlant",
"KeyChain",
"Knife",
"Laptop",
"Lettuce",
"LettuceSliced",
"Microwave",
"Mug",
"MugFilled",
"Newspaper",
"Omelette",
"Pan",
"Plate",
"Plunger",
"Pot",
"Potato",
"PotatoSliced",
"RemoteControl",
"Sandwich",
"ScrubBrush",
"SoapBar",
"SoapBottle",
"Spoon",
"SprayBottle",
"Statue",
"Television",
"TissueBox",
"Toaster",
"ToiletPaper",
"Tomato",
"TomatoSliced",
"WateringCan",
},
"Fridge": {
"Apple",
"AppleSlice",
"Bowl",
"BowlDirty",
"BowlFilled",
"Bread",
"BreadSliced",
"Container",
"ContainerFull",
"Cup",
"Egg",
"EggFried",
"EggShell",
"Lettuce",
"LettuceSliced",
"Mug",
"MugFilled",
"Omelette",
"Pan",
"Plate",
"Pot",
"Potato",
"PotatoSliced",
"Sandwich",
"Tomato",
"TomatoSliced",
},
"GarbageCan": {
"Apple",
"AppleSlice",
"Box",
"Bread",
"BreadSliced",
"Candle",
"CellPhone",
"CreditCard",
"Egg",
"EggFried",
"EggShell",
"LettuceSliced",
"Newspaper",
"Omelette",
"Plunger",
"Potato",
"PotatoSliced",
"Sandwich",
"ScrubBrush",
"SoapBar",
"SoapBottle",
"SprayBottle",
"Statue",
"ToiletPaper",
"Tomato",
"TomatoSliced",
},
"Microwave": {
"Bowl",
"BowlDirty",
"BowlFilled",
"Bread",
"BreadSliced",
"Container",
"ContainerFull",
"Cup",
"Egg",
"EggFried",
"Mug",
"MugFilled",
"Omelette",
"Plate",
"Potato",
"PotatoSliced",
"Sandwich",
},
"PaintingHanger": {"Painting"},
"Pan": {
"Apple",
"AppleSlice",
"EggFried",
"Lettuce",
"LettuceSliced",
"Omelette",
"Potato",
"PotatoSliced",
"Tomato",
"TomatoSliced",
},
"Pot": {
"Apple",
"AppleSlice",
"EggFried",
"Lettuce",
"LettuceSliced",
"Omelette",
"Potato",
"PotatoSliced",
"Tomato",
"TomatoSliced",
},
"Sink": {
"Apple",
"AppleSlice",
"Bowl",
"BowlDirty",
"BowlFilled",
"ButterKnife",
"Container",
"ContainerFull",
"Cup",
"Egg",
"EggFried",
"EggShell",
"Fork",
"Knife",
"Lettuce",
"LettuceSliced",
"Mug",
"MugFilled",
"Omelette",
"Pan",
"Plate",
"Pot",
"Potato",
"PotatoSliced",
"Sandwich",
"ScrubBrush",
"SoapBottle",
"Spoon",
"Tomato",
"TomatoSliced",
"WateringCan",
},
"StoveBurner": {"Omelette", "Pot", "Pan", "EggFried"},
"TableTop": {
"Apple",
"AppleSlice",
"Bowl",
"BowlDirty",
"BowlFilled",
"Box",
"Bread",
"BreadSliced",
"ButterKnife",
"Candle",
"CellPhone",
"CoffeeMachine",
"Container",
"ContainerFull",
"CreditCard",
"Cup",
"Egg",
"EggFried",
"EggShell",
"Fork",
"HousePlant",
"KeyChain",
"Knife",
"Laptop",
"Lettuce",
"LettuceSliced",
"Microwave",
"Mug",
"MugFilled",
"Newspaper",
"Omelette",
"Pan",
"Plate",
"Plunger",
"Pot",
"Potato",
"PotatoSliced",
"RemoteControl",
"Sandwich",
"ScrubBrush",
"SoapBar",
"SoapBottle",
"Spoon",
"SprayBottle",
"Statue",
"Television",
"TissueBox",
"Toaster",
"ToiletPaper",
"Tomato",
"TomatoSliced",
"WateringCan",
},
"ToiletPaperHanger": {"ToiletPaper"},
"TowelHolder": {"Cloth"},
}
def process_alive(pid):
"""
Use kill(0) to determine if pid is alive
:param pid: process id
:rtype: bool
"""
try:
os.kill(pid, 0)
except OSError:
return False
return True
def distance(point1, point2):
x_diff = (point1["x"] - point2["x"]) ** 2
z_diff = (point1["z"] - point2["z"]) ** 2
return math.sqrt(x_diff + z_diff)
def key_for_point(x, z):
return "%0.1f %0.1f" % (x, z)
class Controller(object):
def __init__(
self,
quality=DEFAULT_QUALITY,
fullscreen=False,
headless=False,
port=0,
start_unity=True,
local_executable_path=None,
local_build=False,
commit_id=ai2thor.build.COMMIT_ID,
branch=None,
width=300,
height=300,
x_display=None,
host="127.0.0.1",
scene=None,
image_dir=".",
save_image_per_frame=False,
depth_format=DepthFormat.Meters,
add_depth_noise=False,
download_only=False,
include_private_scenes=False,
server_class=None,
server_type=None,
gpu_device=None,
platform=None,
server_timeout: Optional[float] = 100.0,
server_start_timeout: float = 300.0,
**unity_initialization_parameters,
):
self.receptacle_nearest_pivot_points = {}
self.server = None
self.unity_pid = None
self.container_id = None
self.width = width
self.height = height
self.server_timeout = server_timeout
self.server_start_timeout = server_start_timeout
assert self.server_timeout is None or 0 < self.server_timeout
assert 0 < self.server_start_timeout
self.last_event = None
self.scene = None
self._scenes_in_build = None
self.killing_unity = False
self.quality = quality
self.lock_file = None
self.fullscreen = fullscreen
self.headless = headless
self.depth_format = depth_format
self.add_depth_noise = add_depth_noise
self.include_private_scenes = include_private_scenes
self.x_display = None
self.gpu_device = gpu_device
cuda_visible_devices = list(
map(
int,
filter(
lambda y: y.isdigit(),
map(
lambda x: x.strip(),
os.environ.get("CUDA_VISIBLE_DEVICES", "").split(","),
),
),
)
)
if self.gpu_device is not None:
# numbers.Integral works for numpy.int32/64 and Python int
if not isinstance(self.gpu_device, numbers.Integral) or self.gpu_device < 0:
raise ValueError(
"Invalid gpu_device: '%s'. gpu_device must be >= 0"
% self.gpu_device
)
elif cuda_visible_devices:
if self.gpu_device >= len(cuda_visible_devices):
raise ValueError(
"Invalid gpu_device: '%s'. gpu_device must less than number of CUDA_VISIBLE_DEVICES: %s"
% (self.gpu_device, cuda_visible_devices)
)
else:
self.gpu_device = cuda_visible_devices[self.gpu_device]
elif cuda_visible_devices:
self.gpu_device = cuda_visible_devices[0]
if x_display:
self.x_display = x_display
elif "DISPLAY" in os.environ:
self.x_display = os.environ["DISPLAY"]
if self.x_display and ":" not in self.x_display:
self.x_display = ":" + self.x_display
if quality not in QUALITY_SETTINGS:
valid_qualities = [
q
for q, v in sorted(QUALITY_SETTINGS.items(), key=lambda qv: qv[1])
if v > 0
]
raise ValueError(
"Quality {} is invalid, please select from one of the following settings: ".format(
quality
)
+ ", ".join(valid_qualities)
)
elif QUALITY_SETTINGS[quality] == 0:
raise ValueError(
"Quality {} is associated with an index of 0. "
"Due to a bug in unity, this quality setting would be ignored.".format(
quality
)
)
if server_type is not None and server_class is None:
self.server_class = Controller.server_type_to_class(server_type)
if server_class is None and platform_system() == "Windows":
self.server_class = ai2thor.wsgi_server.WsgiServer
elif (
isinstance(server_class, ai2thor.fifo_server.FifoServer)
and platform_system() == "Windows"
):
raise ValueError("server_class=FifoServer cannot be used on Windows.")
elif server_class is None:
self.server_class = ai2thor.fifo_server.FifoServer
else:
self.server_class = server_class
self._build = None
self.interactive_controller = InteractiveControllerPrompt(
list(DefaultActions),
has_object_actions=True,
image_dir=image_dir,
image_per_frame=save_image_per_frame,
)
if not start_unity:
self._build = ai2thor.build.EditorBuild()
elif local_executable_path:
self._build = ai2thor.build.ExternalBuild(local_executable_path)
else:
self._build = self.find_build(local_build, commit_id, branch, platform)
self._build.download()
if not download_only:
self.start(
port=port,
start_unity=start_unity,
width=width,
height=height,
x_display=x_display,
host=host,
)
self.initialization_parameters = unity_initialization_parameters
if "continuous" in self.initialization_parameters:
warnings.warn(
"Warning: 'continuous' is deprecated and will be ignored,"
" use 'snapToGrid={}' instead.".format(
not self.initialization_parameters["continuous"]
),
DeprecationWarning,
)
if (
"fastActionEmit" in self.initialization_parameters
and self.server_class != ai2thor.fifo_server.FifoServer
):
warnings.warn("fastAtionEmit is only available with the FifoServer")
if "continuousMode" in self.initialization_parameters:
warnings.warn(
"Warning: 'continuousMode' is deprecated and will be ignored,"
" use 'snapToGrid={}' instead.".format(
not self.initialization_parameters["continuousMode"]
),
DeprecationWarning,
)
if "agentControllerType" in self.initialization_parameters:
raise ValueError(
"`agentControllerType` is no longer an allowed initialization parameter."
" Use `agentMode` instead."
)
# Let's set the scene for them!
if scene is None:
scenes_in_build = self.scenes_in_build
if not scenes_in_build:
raise RuntimeError("No scenes are in your build of AI2-THOR!")
# use a robothor scene
robothor_scenes = set(self.robothor_scenes())
# prioritize robothor if locobot is being used
robothor_scenes_in_build = robothor_scenes.intersection(scenes_in_build)
# check for bot as well, for backwards compatibility support
if (
unity_initialization_parameters.get("agentMode", "default").lower()
in {"locobot", "bot"}
and robothor_scenes_in_build
):
# get the first robothor scene
scene = sorted(list(robothor_scenes_in_build))[0]
else:
ithor_scenes = set(self.ithor_scenes())
ithor_scenes_in_build = ithor_scenes.intersection(scenes_in_build)
if ithor_scenes_in_build:
# prioritize iTHOR because that's what the default agent best uses
scene = sorted(list(ithor_scenes_in_build))[0]
else:
# perhaps only using RoboTHOR or using only custom scenes
scene = sorted(list(scenes_in_build))[0]
event = self.reset(scene)
# older builds don't send actionReturn on Initialize
init_return = event.metadata["actionReturn"]
if init_return:
self.server.set_init_params(init_return)
logging.info(f"Initialize return: {init_return}")
def _build_server(self, host, port, width, height):
if self.server is not None:
return
if self.server_class.server_type not in self._build.server_types:
warnings.warn(
"server_type: %s not available in build: %s, defaulting to WSGI"
% (self.server_class.server_type, self._build.url)
)
self.server_class = ai2thor.wsgi_server.WsgiServer
if self.server_class == ai2thor.wsgi_server.WsgiServer:
self.server = ai2thor.wsgi_server.WsgiServer(
host=host,
timeout=self.server_timeout,
port=port,
width=width,
height=height,
depth_format=self.depth_format,
add_depth_noise=self.add_depth_noise,
)
elif self.server_class == ai2thor.fifo_server.FifoServer:
#Not supported on Windows
if os.name == 'nt':
raise ValueError("FIFO server not supported on Windows platform.")
self.server = ai2thor.fifo_server.FifoServer(
width=width,
height=height,
timeout=self.server_timeout,
depth_format=self.depth_format,
add_depth_noise=self.add_depth_noise,
)
def __enter__(self):
return self
def __exit__(self, *args):
self.stop()
@property
def scenes_in_build(self):
if self._scenes_in_build is not None:
return self._scenes_in_build
try:
event = self.step(action="GetScenesInBuild")
self._scenes_in_build = set(event.metadata["actionReturn"])
except ValueError as e:
# will happen for old builds without GetScenesInBuild
self._scenes_in_build = set()
return self._scenes_in_build
@staticmethod
def normalize_scene(scene):
if re.match(r"^FloorPlan[0-9]+$", scene):
scene = scene + "_physics"
return scene
def reset(self, scene=None, **init_params):
if scene is None:
scene = self.scene
is_procedural = isinstance(scene, dict)
if is_procedural:
# ProcTHOR scene
self.server.send(dict(action="Reset", sceneName="Procedural", sequenceId=0))
self.last_event = self.server.receive()
else:
scene = Controller.normalize_scene(scene)
# scenes in build can be an empty set when GetScenesInBuild doesn't exist as an action
# for old builds
if self.scenes_in_build and scene not in self.scenes_in_build:
raise ValueError(
"\nScene '{}' not contained in build (scene names are case sensitive)."
"\nPlease choose one of the following scene names:\n\n{}".format(
scene,
", ".join(sorted(list(self.scenes_in_build))),
)
)
self.server.send(dict(action="Reset", sceneName=scene, sequenceId=0))
self.last_event = self.server.receive()
if (
scene in self.robothor_scenes()
and self.initialization_parameters.get("agentMode", "default").lower()
!= "locobot"
):
warnings.warn(
"You are using a RoboTHOR scene without using the standard LoCoBot.\n"
+ "Did you mean to mean to set agentMode='locobot' upon initialization or within controller.reset(...)?"
)
# update the initialization parameters
init_params = init_params.copy()
target_width = init_params.pop("width", self.width)
target_height = init_params.pop("height", self.height)
# width and height are updates in 'ChangeResolution', not 'Initialize'
# with CloudRendering the command-line height/width aren't respected, so
# we compare here with what the desired height/width are and
# update the resolution if they are different
# if Python is running against the Unity Editor then
# ChangeResolution won't have an affect, so it gets skipped
if (self.server.unity_proc is not None) and (
target_width != self.last_event.screen_width
or target_height != self.last_event.screen_height
):
self.step(
action="ChangeResolution",
x=target_width,
y=target_height,
raise_for_failure=True,
)
self.width = target_width
self.height = target_height
# the command line -quality parameter is not respected with the CloudRendering
# engine, so the quality is manually changed after launch
if self._build.platform == ai2thor.platform.CloudRendering:
self.step(action="ChangeQuality", quality=self.quality)
# updates the initialization parameters
self.initialization_parameters.update(init_params)
# RoboTHOR checks
agent_mode = self.initialization_parameters.get("agentMode", "default")
if agent_mode.lower() == "bot":
self.initialization_parameters["agentMode"] = "locobot"
warnings.warn(
"On reset and upon initialization, agentMode='bot' has been renamed to agentMode='locobot'."
)
self.last_event = self.step(
action="Initialize",
raise_for_failure=True,
**self.initialization_parameters,
)
if is_procedural:
self.last_event = self.step(action="CreateHouse", house=scene)
self.scene = scene
return self.last_event
@classmethod
def server_type_to_class(cls, server_type):
if server_type == ai2thor.fifo_server.FifoServer.server_type:
return ai2thor.fifo_server.FifoServer
elif server_type == ai2thor.wsgi_server.WsgiServer.server_type:
return ai2thor.wsgi_server.WsgiServer
else:
valid_servers = str.join([f"'{x}'" for x in [ai2thor.fifo_server.FifoServer.server_type, ai2thor.wsgi_server.WsgiServer.server_type]], ",")
raise ValueError(f"Invalid server type '{server_type}'. Only valid values: {valid_servers} ")
def random_initialize(
self,
random_seed=None,
randomize_open=False,
unique_object_types=False,
exclude_receptacle_object_pairs=[],
max_num_repeats=1,
remove_prob=0.5,
):
raise Exception(
"RandomInitialize has been removed. Use InitialRandomSpawn - https://ai2thor.allenai.org/ithor/documentation/actions/initialization/#object-position-randomization"
)
@lru_cache()
def ithor_scenes(
self,
include_kitchens=True,
include_living_rooms=True,
include_bedrooms=True,
include_bathrooms=True,
):
types = []
if include_kitchens:
types.append((1, 31))
if include_living_rooms:
types.append((201, 231))
if include_bedrooms:
types.append((301, 331))
if include_bathrooms:
types.append((401, 431))
# keep this as a list because the order may look weird otherwise
scenes = []
for low, high in types:
for i in range(low, high):
scenes.append("FloorPlan%s_physics" % i)
return scenes
@lru_cache()
def robothor_scenes(self, include_train=True, include_val=True):
# keep this as a list because the order may look weird otherwise
scenes = []
stages = dict()
# from FloorPlan_Train[1:12]_[1:5]
if include_train:
stages["Train"] = range(1, 13)
if include_val:
# from FloorPlan_Val[1:12]_[1:5]
stages["Val"] = range(1, 4)
for stage, wall_configs in stages.items():
for wall_config_i in wall_configs:
for object_config_i in range(1, 6):
scenes.append(
"FloorPlan_{stage}{wall_config}_{object_config}".format(
stage=stage,
wall_config=wall_config_i,
object_config=object_config_i,
)
)
return scenes
@lru_cache()
def scene_names(self):
return self.ithor_scenes() + self.robothor_scenes()
def _prune_release(self, release):
try:
# we must try to get a lock here since its possible that a process could still
# be running with this release
lock = LockEx(release, blocking=False)
lock.lock()
# its possible that another process could prune
# out a release when running multiple procs
# that all race to prune the same release
if os.path.isdir(release):
tmp_prune_dir = os.path.join(
self.tmp_dir,
"-".join(
[
os.path.basename(release),
str(time.time()),
str(random.random()),
"prune",
]
),
)
os.rename(release, tmp_prune_dir)
shutil.rmtree(tmp_prune_dir)
lock.unlock()
lock.unlink()
return True
except BlockingIOError:
return False
def prune_releases(self):
current_exec_path = self._build.executable_path
rdir = self.releases_dir
makedirs(self.tmp_dir)
makedirs(self.releases_dir)
# sort my mtime ascending, keeping the 3 most recent, attempt to prune anything older
all_dirs = list(
filter(
os.path.isdir, map(lambda x: os.path.join(rdir, x), os.listdir(rdir))
)
)
dir_stats = defaultdict(lambda: 0)
for d in all_dirs:
try:
dir_stats[d] = os.stat(d).st_mtime
# its possible for multiple procs to race to stat/prune
# creating the possibility that between the listdir/stat the directory was
# pruned
except FileNotFoundError:
pass
sorted_dirs = sorted(all_dirs, key=lambda x: dir_stats[x])[:-3]
for release in sorted_dirs:
if current_exec_path.startswith(release):
continue
self._prune_release(release)
def next_interact_command(self):
# NOTE: Leave this here because it is incompatible with Windows.
from ai2thor.interact import get_term_character
current_buffer = ""
while True:
commands = self._interact_commands
current_buffer += get_term_character()
if current_buffer == "q" or current_buffer == "\x03":
break
if current_buffer in commands:
yield commands[current_buffer]
current_buffer = ""
else:
match = False
for k, v in commands.items():
if k.startswith(current_buffer):
match = True
break
if not match:
current_buffer = ""
def interact(
self,
semantic_segmentation_frame=False,
instance_segmentation_frame=False,
depth_frame=False,
color_frame=False,
metadata=False,
):
self.interactive_controller.interact(
self,
semantic_segmentation_frame,
instance_segmentation_frame,
depth_frame,
color_frame,
metadata,
)
def multi_step_physics(self, action, timeStep=0.05, max_steps=20):
events = []
self.step(action=dict(action="PausePhysicsAutoSim"), raise_for_failure=True)
events.append(self.step(action))
while not self.last_event.metadata["isSceneAtRest"]:
events.append(
self.step(
action=dict(action="AdvancePhysicsStep", timeStep=timeStep),
raise_for_failure=True,
)
)
if len(events) == (max_steps - 1):
events.append(
self.step(
action=dict(action="UnpausePhysicsAutoSim"),
raise_for_failure=True,
)
)
break
return events
def step(self, action: Union[str, Dict[str, Any]]=None, **action_args):
if isinstance(action, Dict):
action = copy.deepcopy(action) # prevent changes from leaking
else:
action = dict(action=action)
raise_for_failure = action_args.pop("raise_for_failure", False)
action.update(action_args)
if self.headless:
action["renderImage"] = False
# prevent changes to the action from leaking
action = copy.deepcopy(action)
# XXX should be able to get rid of this with some sort of deprecation warning
if "AI2THOR_VISIBILITY_DISTANCE" in os.environ:
action["visibilityDistance"] = float(
os.environ["AI2THOR_VISIBILITY_DISTANCE"]
)
self.last_action = action
# dangerously converts rotation(float) to rotation(dict(x=0, y=float, z=0))
# this should be removed when ServerActions have been removed from Unity
# for all relevant actions.
rotation = action.get("rotation")
if rotation is not None and not isinstance(rotation, dict):
action["rotation"] = dict(y=rotation)
# Support for deprecated parameter names (old: new)
# Note that these parameters used to be applicable to ANY action.
changed_parameter_names = {
"renderClassImage": "renderSemanticSegmentation",
"renderObjectImage": "renderInstanceSegmentation",
}
for old, new in changed_parameter_names.items():
if old in action:
# warnings.warn(old + " has been renamed to " + new)
action[new] = action[old]
# not deleting to allow for older builds to continue to work
# del action[old]
self.server.send(action)
try:
self.last_event = self.server.receive()
except UnityCrashException:
self.server.stop()
self.server = None
# we don't need to pass port or host, since this Exception
# is only thrown from the FifoServer, start_unity is also
# not passed since Unity would have to have been started
# for this to be thrown
message = (
f"Restarting unity due to crash when when running action {action}"
f" in scene {self.last_event.metadata['sceneName']}:\n{traceback.format_exc()}"
)
warnings.warn(message)
self.start(width=self.width, height=self.height, x_display=self.x_display)
self.reset()
raise RestartError(message)
except Exception as e:
self.server.stop()
raise (TimeoutError if isinstance(e, TimeoutError) else RuntimeError)(
f"Error encountered when running action {action}"
f" in scene {self.last_event.metadata['sceneName']}."
)
if not self.last_event.metadata["lastActionSuccess"]:
if self.last_event.metadata["errorCode"] in [
"InvalidAction",
"MissingArguments",
"AmbiguousAction",
"InvalidArgument",
]:
raise ValueError(self.last_event.metadata["errorMessage"])
elif raise_for_failure:
raise RuntimeError(
self.last_event.metadata.get("errorMessage", f"{action} failed")
)
return self.last_event
def unity_command(self, width, height, headless):
fullscreen = 1 if self.fullscreen else 0
command = self._build.executable_path
if headless:
command += " -batchmode -nographics"
else:
command += (
" -screen-fullscreen %s -screen-quality %s -screen-width %s -screen-height %s"
% (fullscreen, QUALITY_SETTINGS[self.quality], width, height)
)
if self.gpu_device is not None:
# This parameter only applies to the CloudRendering platform.
# Vulkan device ids need not correspond to CUDA device ids. The below
# code finds the device_uuids for each GPU and then figures out the mapping
# between the CUDA device ids and the Vulkan device ids.
cuda_vulkan_mapping_path = os.path.join(self.base_dir, "cuda-vulkan-mapping.json")
with LockEx(cuda_vulkan_mapping_path):
if not os.path.exists(cuda_vulkan_mapping_path):
vulkan_result = None
try:
vulkan_result = subprocess.run(
["vulkaninfo"],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True
)
except FileNotFoundError:
pass
if vulkan_result is None or vulkan_result.returncode != 0:
raise RuntimeError(
"vulkaninfo failed to run, please ask your administrator to"
" install `vulkaninfo` (e.g. on Ubuntu systems this requires running"
" `sudo apt install vulkan-tools`)."
)
current_gpu = None
device_uuid_to_vulkan_gpu_index = {}
for l in vulkan_result.stdout.splitlines():
gpu_match = re.match("GPU([0-9]+):", l)
if gpu_match is not None:
current_gpu = int(gpu_match.group(1))
elif "deviceUUID" in l:
device_uuid = l.split("=")[1].strip()
if device_uuid in device_uuid_to_vulkan_gpu_index:
assert current_gpu == device_uuid_to_vulkan_gpu_index[device_uuid]
else:
device_uuid_to_vulkan_gpu_index[device_uuid] = current_gpu
nvidiasmi_result = None
try:
nvidiasmi_result = subprocess.run(
["nvidia-smi", "-L"],
stdout=subprocess.PIPE,
stderr=subprocess.DEVNULL,
universal_newlines=True
)
except FileNotFoundError:
pass
if nvidiasmi_result is None or nvidiasmi_result.returncode != 0:
raise RuntimeError(
"`nvidia-smi` failed to run. To use CloudRendering, please ensure you have nvidia GPUs"
" installed."
)
nvidia_gpu_index_to_device_uuid = {}
for l in nvidiasmi_result.stdout.splitlines():
gpu_match = re.match("GPU ([0-9]+):", l)
if gpu_match is None:
continue
current_gpu = int(gpu_match.group(1))
uuid_match = re.match(".*\\(UUID: GPU-([^)]+)\\)", l)
nvidia_gpu_index_to_device_uuid[current_gpu] = uuid_match.group(1)
cuda_vulkan_mapping = {}
for cuda_gpu_index, device_uuid in nvidia_gpu_index_to_device_uuid.items():
if device_uuid not in device_uuid_to_vulkan_gpu_index:
raise RuntimeError(
f"Could not find a Vulkan device corresponding"
f" to the CUDA device with UUID {device_uuid}."
)
cuda_vulkan_mapping[cuda_gpu_index] = device_uuid_to_vulkan_gpu_index[device_uuid]
with open(cuda_vulkan_mapping_path, "w") as f:
json.dump(cuda_vulkan_mapping, f)
else:
with open(cuda_vulkan_mapping_path, "r") as f:
# JSON dictionaries always have strings as keys, need to re-map here
cuda_vulkan_mapping = {int(k):v for k, v in json.load(f).items()}
command += f" -force-device-index {cuda_vulkan_mapping[self.gpu_device]}"
return shlex.split(command)
def _start_unity_thread(self, env, width, height, server_params, image_name):
# get environment variables
env["AI2THOR_CLIENT_TOKEN"] = self.server.client_token = str(uuid.uuid4())
env["AI2THOR_SERVER_TYPE"] = self.server.server_type
env["AI2THOR_SERVER_SIDE_SCREENSHOT"] = "False" if self.headless else "True"
for k, v in server_params.items():
env["AI2THOR_" + k.upper()] = str(v)
# print("Viewer: http://%s:%s/viewer" % (host, port))
command = self.unity_command(width, height, self.headless)
env.update(
self._build.platform.launch_env(self.width, self.height, self.x_display)
)
makedirs(self.log_dir)
extra_args={}
if os.name == 'nt':
extra_args = dict(shell=True)
self.server.unity_proc = proc = subprocess.Popen(
command,
env=env,
stdout=open(os.path.join(self.log_dir, "unity.log"), "a"),
stderr=open(os.path.join(self.log_dir, "unity.log"), "a"),
**extra_args
)
try:
if self._build.platform == ai2thor.platform.CloudRendering:
# if Vulkan is not configured correctly then Unity will crash
# immediately after launching
self.server.unity_proc.wait(timeout=1.0)
if self.server.unity_proc.returncode is not None:
message = (
"Unity process has exited - check "
"~/.config/unity3d/Allen\ Institute\ for\ "
"Artificial\ Intelligence/AI2-THOR/Player.log for errors. "
"Confirm that Vulkan is properly configured on this system "
"using vulkaninfo from the vulkan-utils package. returncode=%s"
% (self.server.unity_proc.returncode,)
)
raise Exception(message)
except subprocess.TimeoutExpired:
pass
self.unity_pid = proc.pid
atexit.register(lambda: self.server.stop())
@property
def tmp_dir(self):
return os.path.join(self.base_dir, "tmp")
@property
def releases_dir(self):
return os.path.join(self.base_dir, "releases")
@property
def cache_dir(self):
return os.path.join(self.base_dir, "cache")
@property
def commits_cache_dir(self):
return os.path.join(self.cache_dir, "commits")
@property
def base_dir(self):
return os.path.join(os.path.expanduser("~"), ".ai2thor")
@property
def log_dir(self):
return os.path.join(self.base_dir, "log")
def _cache_commit_filename(self, branch):
encoded_branch = re.sub(r"[^a-zA-Z0-9_\-.]", "_", re.sub("_", "__", branch))
return os.path.join(self.commits_cache_dir, encoded_branch + ".json")
def _cache_commit_history(self, branch, payload):
makedirs(self.commits_cache_dir)
cache_filename = self._cache_commit_filename(branch)
atomic_write(cache_filename, json.dumps(payload))
def _get_cache_commit_history(self, branch):
cache_filename = self._cache_commit_filename(branch)
payload = None
mtime = None
if os.path.exists(cache_filename):
mtime = os.stat(cache_filename).st_mtime
with open(cache_filename, "r") as f:
payload = json.loads(f.read())
return (payload, mtime)
def _branch_commits(self, branch):
import requests
payload = []
makedirs(self.commits_cache_dir) # must make directory for lock to succeed
# must lock to handle case when multiple procs are started
# and all fetch from api.github.com
with LockEx(self._cache_commit_filename(branch)):
cache_payload, cache_mtime = self._get_cache_commit_history(branch)
# we need to limit how often we hit api.github.com since
# there is a rate limit of 60 per hour per IP
if cache_payload and (time.time() - cache_mtime) < 300:
payload = cache_payload
else:
try:
res = requests.get(
"https://api.github.com/repos/allenai/ai2thor/commits?sha=%s"
% branch
)
if res.status_code == 404:
raise ValueError("Invalid branch name: %s" % branch)
elif res.status_code == 403:
payload, _ = self._get_cache_commit_history(branch)
if payload:
warnings.warn(
"Error retrieving commits: %s - using cached commit history for %s"
% (res.text, branch)
)
else:
res.raise_for_status()
elif res.status_code == 200:
payload = res.json()
self._cache_commit_history(branch, payload)
else:
res.raise_for_status()
except requests.exceptions.ConnectionError as e:
payload, _ = self._get_cache_commit_history(branch)
if payload:
warnings.warn(
"Unable to connect to github.com: %s - using cached commit history for %s"
% (e, branch)
)
else:
raise Exception(
"Unable to get commit history for branch %s and no cached history exists: %s"
% (branch, e)
)
return [c["sha"] for c in payload]
def local_commits(self):
git_dir = os.path.normpath(
os.path.dirname(os.path.realpath(__file__)) + "/../.git"
)
commits = (
subprocess.check_output(
"git --git-dir=" + git_dir + " log -n 10 --format=%H", shell=True
)
.decode("ascii")
.strip()
.split("\n")
)
return commits
def find_build(self, local_build, commit_id, branch, platform):
releases_dir = self.releases_dir
if platform_architecture()[0] != "64bit":
raise Exception("Only 64bit currently supported")
if branch:
commits = self._branch_commits(branch)
elif commit_id:
commits = [commit_id]
else:
commits = self.local_commits()
if local_build:
releases_dir = os.path.normpath(
os.path.dirname(os.path.realpath(__file__)) + "/../unity/builds"
)
commits = [
ai2thor.build.LOCAL_BUILD_COMMIT_ID
] + commits # we add the commits to the list to allow the ci_build to succeed
request = ai2thor.platform.Request(
platform_system(), self.width, self.height, self.x_display, self.headless
)
if platform is None:
candidate_platforms = ai2thor.platform.select_platforms(request)
else:
candidate_platforms = [platform]
builds = self.find_platform_builds(
candidate_platforms, request, commits, releases_dir, local_build
)
if not builds:
platforms_message = ",".join(map(lambda p: p.name(), candidate_platforms))
if commit_id:
raise ValueError(
"Invalid commit_id: %s - no build exists for arch=%s platforms=%s"
% (commit_id, platform_system(), platforms_message)
)
else:
raise Exception(
"No build exists for arch=%s platforms=%s and commits: %s"
% (
platform_system(),
platforms_message,
", ".join(map(lambda x: x[:8], commits)),
)
)
# select the first build + platform that succeeds
# For Linux, this will select Linux64 (X11). If CloudRendering
# is enabled, then it will get selected over Linux64 (assuming a build is available)
for build in builds:
if build.platform.is_valid(request):
# don't emit warnings for CloudRendering since we allow it to fallback to a default
if (
build.commit_id != commits[0]
and build.platform != ai2thor.platform.CloudRendering
):
warnings.warn(
"Build for the most recent commit: %s is not available. Using commit build %s"
% (commits[0], build.commit_id)
)
return build
error_messages = [
"The following builds were found, but had missing dependencies. Only one valid platform is required to run AI2-THOR."
]
for build in builds:
errors = build.platform.validate(request)
message = (
"Platform %s failed validation with the following errors: %s\n "
% (
build.platform.name(),
"\t\n".join(errors),
)
)
instructions = build.platform.dependency_instructions(request)
if instructions:
message += instructions
error_messages.append(message)
raise Exception("\n".join(error_messages))
def find_platform_builds(
self, candidate_platforms, request, commits, releases_dir, local_build
):
builds = []
for plat in candidate_platforms:
for commit_id in commits:
commit_build = ai2thor.build.Build(
plat, commit_id, self.include_private_scenes, releases_dir
)
try:
if os.path.isdir(commit_build.base_dir) or (
not local_build and commit_build.exists()
):
builds.append(commit_build)
# break out of commit loop, but allow search through all the platforms
break
except Exception:
pass
# print("Got build for %s: " % (found_build.url))
return builds
def start(
self,
port=0,
start_unity=True,
width=300,
height=300,
x_display=None,
host="127.0.0.1",
player_screen_width=None,
player_screen_height=None,
):
self._build_server(host, port, width, height)
if "AI2THOR_VISIBILITY_DISTANCE" in os.environ:
warnings.warn(
"AI2THOR_VISIBILITY_DISTANCE environment variable is deprecated, use \
the parameter visibilityDistance parameter with the Initialize action instead"
)
if player_screen_width is not None:
warnings.warn(
"'player_screen_width' parameter is deprecated, use the 'width'"
" parameter instead."
)
width = player_screen_width
if player_screen_height is not None:
warnings.warn(
"'player_screen_height' parameter is deprecated, use the 'height'"
" parameter instead."
)
height = player_screen_height
if height <= 0 or width <= 0:
raise Exception("Screen resolution must be > 0x0")
if self.server.started:
warnings.warn(
"start method depreciated. The server started when the Controller was initialized."
)
# Stops the current server and creates a new one. This is done so
# that the arguments passed in will be used on the server.
self.stop()
env = os.environ.copy()
image_name = None
self.server.start()
if start_unity:
self._build.lock_sh()
self.prune_releases()
unity_params = self.server.unity_params()
self._start_unity_thread(env, width, height, unity_params, image_name)
# receive the first request
self.last_event = self.server.receive(timeout=self.server_start_timeout)
# we should be able to get rid of this since we check the resolution in .reset()
if self.server.unity_proc is not None and (height < 300 or width < 300):
self.last_event = self.step("ChangeResolution", x=width, y=height)
return self.last_event
def stop(self):
self.stop_unity()
self.server.stop()
self._build.unlock()
def stop_unity(self):
if self.unity_pid and process_alive(self.unity_pid):
self.killing_unity = True
proc = self.server.unity_proc
for i in range(4):
if not process_alive(proc.pid):
break
try:
proc.kill()
proc.wait(1)
except subprocess.TimeoutExpired:
pass
class BFSSearchPoint:
def __init__(
self, start_position, move_vector, heading_angle=0.0, horizon_angle=0.0
):
self.start_position = start_position
self.move_vector = defaultdict(lambda: 0.0)
self.move_vector.update(move_vector)
self.heading_angle = heading_angle
self.horizon_angle = horizon_angle
def target_point(self):
x = self.start_position["x"] + self.move_vector["x"]
z = self.start_position["z"] + self.move_vector["z"]
return dict(x=x, z=z)
class BFSController(Controller):
def __init__(self, grid_size=0.25):
super(BFSController, self).__init__()
self.rotations = [0, 90, 180, 270]
self.horizons = [330, 0, 30]
self.allow_enqueue = True
self.queue = deque()
self.seen_points = []
self.visited_seen_points = []
self.grid_points = []
self.grid_size = grid_size
self._check_visited = False
self.distance_threshold = self.grid_size / 5.0
def visualize_points(self, scene_name, wait_key=10):
import cv2
points = set()
xs = []
zs = []
# Follow the file as it grows
for point in self.grid_points:
xs.append(point["x"])
zs.append(point["z"])
points.add(str(point["x"]) + "," + str(point["z"]))
image_width = 470
image_height = 530
image = np.zeros((image_height, image_width, 3), np.uint8)
if not xs:
return
min_x = min(xs) - 1
max_x = max(xs) + 1
min_z = min(zs) - 1
max_z = max(zs) + 1
for point in list(points):
x, z = map(float, point.split(","))
circle_x = round(((x - min_x) / float(max_x - min_x)) * image_width)
z = (max_z - z) + min_z
circle_y = round(((z - min_z) / float(max_z - min_z)) * image_height)
cv2.circle(image, (circle_x, circle_y), 5, (0, 255, 0), -1)
cv2.imshow(scene_name, image)
cv2.waitKey(wait_key)
def has_islands(self):
queue = []
seen_points = set()
mag = self.grid_size
def enqueue_island_points(p):
if json.dumps(p) in seen_points:
return
queue.append(dict(z=p["z"] + mag, x=p["x"]))
queue.append(dict(z=p["z"] - mag, x=p["x"]))
queue.append(dict(z=p["z"], x=p["x"] + mag))
queue.append(dict(z=p["z"], x=p["x"] - mag))
seen_points.add(json.dumps(p))
enqueue_island_points(self.grid_points[0])
while queue:
point_to_find = queue.pop()
for p in self.grid_points:
dist = math.sqrt(
((point_to_find["x"] - p["x"]) ** 2)
+ ((point_to_find["z"] - p["z"]) ** 2)
)
if dist < 0.05:
enqueue_island_points(p)
return len(seen_points) != len(self.grid_points)
def build_graph(self):
import networkx as nx
graph = nx.Graph()
for point in self.grid_points:
self._build_graph_point(graph, point)
return graph
def key_for_point(self, point):
return "{x:0.3f}|{z:0.3f}".format(**point)
def _build_graph_point(self, graph, point):
for p in self.grid_points:
dist = math.sqrt(
((point["x"] - p["x"]) ** 2) + ((point["z"] - p["z"]) ** 2)
)
if dist <= (self.grid_size + 0.01) and dist > 0:
graph.add_edge(self.key_for_point(point), self.key_for_point(p))
def move_relative_points(self, all_points, graph, position, rotation):
action_orientation = {
0: dict(x=0, z=1, action="MoveAhead"),
90: dict(x=1, z=0, action="MoveRight"),
180: dict(x=0, z=-1, action="MoveBack"),
270: dict(x=-1, z=0, action="MoveLeft"),
}
move_points = dict()
for n in graph.neighbors(self.key_for_point(position)):
point = all_points[n]
x_o = round((point["x"] - position["x"]) / self.grid_size)
z_o = round((point["z"] - position["z"]) / self.grid_size)
for target_rotation, offsets in action_orientation.items():
delta = round(rotation + target_rotation) % 360
ao = action_orientation[delta]
action_name = action_orientation[target_rotation]["action"]
if x_o == ao["x"] and z_o == ao["z"]:
move_points[action_name] = point
break
return move_points
def plan_horizons(self, agent_horizon, target_horizon):
actions = []
horizon_step_map = {330: 3, 0: 2, 30: 1, 60: 0}
look_diff = (
horizon_step_map[int(agent_horizon)] - horizon_step_map[int(target_horizon)]
)
if look_diff > 0:
for i in range(look_diff):
actions.append(dict(action="LookDown"))
else:
for i in range(abs(look_diff)):
actions.append(dict(action="LookUp"))
return actions
def plan_rotations(self, agent_rotation, target_rotation):
right_diff = target_rotation - agent_rotation
if right_diff < 0:
right_diff += 360
right_steps = right_diff / 90
left_diff = agent_rotation - target_rotation
if left_diff < 0:
left_diff += 360
left_steps = left_diff / 90
actions = []
if right_steps < left_steps:
for i in range(int(right_steps)):
actions.append(dict(action="RotateRight"))
else:
for i in range(int(left_steps)):
actions.append(dict(action="RotateLeft"))
return actions
def shortest_plan(self, graph, agent, target):
import networkx as nx
path = nx.shortest_path(
graph,
self.key_for_point(agent["position"]),
self.key_for_point(target["position"]),
)
actions = []
all_points = {}
for point in self.grid_points:
all_points[self.key_for_point(point)] = point
# assert all_points[path[0]] == agent['position']
current_position = agent["position"]
current_rotation = agent["rotation"]["y"]
for p in path[1:]:
inv_pms = {
self.key_for_point(v): k
for k, v in self.move_relative_points(
all_points, graph, current_position, current_rotation
).items()
}
actions.append(dict(action=inv_pms[p]))
current_position = all_points[p]
actions += self.plan_horizons(agent["cameraHorizon"], target["cameraHorizon"])
actions += self.plan_rotations(agent["rotation"]["y"], target["rotation"]["y"])
# self.visualize_points(path)
return actions
def enqueue_point(self, point):
# ensure there are no points near the new point
if self._check_visited or not any(
map(
lambda p: distance(p, point.target_point()) < self.distance_threshold,
self.seen_points,
)
):
self.seen_points.append(point.target_point())
self.queue.append(point)
def enqueue_points(self, agent_position):
if not self.allow_enqueue:
return
if not self._check_visited or not any(
map(
lambda p: distance(p, agent_position) < self.distance_threshold,
self.visited_seen_points,
)
):
self.enqueue_point(
BFSSearchPoint(agent_position, dict(x=-1 * self.grid_size))
)
self.enqueue_point(BFSSearchPoint(agent_position, dict(x=self.grid_size)))
self.enqueue_point(
BFSSearchPoint(agent_position, dict(z=-1 * self.grid_size))
)
self.enqueue_point(
BFSSearchPoint(agent_position, dict(z=1 * self.grid_size))
)
self.visited_seen_points.append(agent_position)
def search_all_closed(self, scene_name):
self.allow_enqueue = True
self.queue = deque()
self.seen_points = []
self.visited_seen_points = []
self.grid_points = []
self.reset(scene_name)
event = self.step(dict(action="Initialize", gridSize=self.grid_size))
self.enqueue_points(event.metadata["agent"]["position"])
while self.queue:
self.queue_step()
# self.visualize_points(scene_name)
def start_search(
self,
scene_name,
random_seed,
full_grid,
current_receptacle_object_pairs,
randomize=True,
):
self.seen_points = []
self.visited_seen_points = []
self.queue = deque()
self.grid_points = []
# we only search a pre-defined grid with all the cabinets/fridges closed
# then keep the points that can still be reached
self.allow_enqueue = True
for gp in full_grid:
self.enqueue_points(gp)
self.allow_enqueue = False
self.reset(scene_name)
receptacle_object_pairs = []
for op in current_receptacle_object_pairs:
object_id, receptacle_object_id = op.split("||")
receptacle_object_pairs.append(
dict(receptacleObjectId=receptacle_object_id, objectId=object_id)
)
if randomize:
self.random_initialize(
random_seed=random_seed,
unique_object_types=True,
exclude_receptacle_object_pairs=receptacle_object_pairs,
)
# there is some randomization in initialize scene
# and if a seed is passed in this will keep it
# deterministic
if random_seed is not None:
random.seed(random_seed)
self.initialize_scene()
while self.queue:
self.queue_step()
# self.visualize_points(scene_name)
self.prune_points()
# self.visualize_points(scene_name)
# get rid of unreachable points
def prune_points(self):
final_grid_points = set()
for gp in self.grid_points:
final_grid_points.add(key_for_point(gp["x"], gp["z"]))
pruned_grid_points = []
for gp in self.grid_points:
found = False
for x in [1, -1]:
found |= (
key_for_point(gp["x"] + (self.grid_size * x), gp["z"])
in final_grid_points
)
for z in [1, -1]:
found |= (
key_for_point(gp["x"], (self.grid_size * z) + gp["z"])
in final_grid_points
)
if found:
pruned_grid_points.append(gp)
self.grid_points = pruned_grid_points
def is_object_visible(self, object_id):
for obj in self.last_event.metadata["objects"]:
if obj["objectId"] == object_id and obj["visible"]:
return True
return False
def find_visible_receptacles(self):
receptacle_points = []
receptacle_pivot_points = []
# pickup all objects
visibility_object_id = None
visibility_object_types = ["Mug", "CellPhone"]
for obj in self.last_event.metadata["objects"]:
if obj["pickupable"]:
self.step(
action=dict(
action="PickupObject",
objectId=obj["objectId"],
forceVisible=True,
)
)
if (
visibility_object_id is None
and obj["objectType"] in visibility_object_types
):
visibility_object_id = obj["objectId"]
for point in self.grid_points:
self.step(
dict(action="Teleport", x=point["x"], y=point["y"], z=point["z"]),
raise_for_failure=True,
)
for rot, hor in product(self.rotations, self.horizons):
event = self.step(
dict(action="RotateLook", rotation=rot, horizon=hor),
raise_for_failure=True,
)
for j in event.metadata["objects"]:
if j["receptacle"] and j["visible"]:
receptacle_points.append(
dict(
distance=j["distance"],
pivotId=0,
receptacleObjectId=j["objectId"],
searchNode=dict(
horizon=hor,
rotation=rot,
openReceptacle=False,
pivotId=0,
receptacleObjectId="",
x=point["x"],
y=point["y"],
z=point["z"],
),
)
)
if j["openable"]:
self.step(
action=dict(
action="OpenObject",
forceVisible=True,
objectId=j["objectId"],
),
raise_for_failure=True,
)
for pivot_id in range(j["receptacleCount"]):
self.step(
action=dict(
action="Replace",
forceVisible=True,
receptacleObjectId=j["objectId"],
objectId=visibility_object_id,
pivot=pivot_id,
),
raise_for_failure=True,
)
if self.is_object_visible(visibility_object_id):
receptacle_pivot_points.append(
dict(
distance=j["distance"],
pivotId=pivot_id,
receptacleObjectId=j["objectId"],
searchNode=dict(
horizon=hor,
rotation=rot,
openReceptacle=j["openable"],
pivotId=pivot_id,
receptacleObjectId=j["objectId"],
x=point["x"],
y=point["y"],
z=point["z"],
),
)
)
if j["openable"]:
self.step(
action=dict(
action="CloseObject",
forceVisible=True,
objectId=j["objectId"],
),
raise_for_failure=True,
)
return receptacle_pivot_points, receptacle_points
def find_visible_objects(self):
seen_target_objects = defaultdict(list)
for point in self.grid_points:
self.step(
dict(action="Teleport", x=point["x"], y=point["y"], z=point["z"]),
raise_for_failure=True,
)
for rot, hor in product(self.rotations, self.horizons):
event = self.step(
dict(action="RotateLook", rotation=rot, horizon=hor),
raise_for_failure=True,
)
object_receptacle = dict()
for obj in event.metadata["objects"]:
if obj["receptacle"]:
for pso in obj["pivotSimObjs"]:
object_receptacle[pso["objectId"]] = obj
for obj in filter(
lambda x: x["visible"] and x["pickupable"],
event.metadata["objects"],
):
# if obj['objectId'] in object_receptacle and\
# object_receptacle[obj['objectId']]['openable'] and not \
# object_receptacle[obj['objectId']]['isopen']:
# continue
seen_target_objects[obj["objectId"]].append(
dict(distance=obj["distance"], agent=event.metadata["agent"])
)
return seen_target_objects
def initialize_scene(self):
self.target_objects = []
self.object_receptacle = defaultdict(
lambda: dict(objectId="StartupPosition", pivotSimObjs=[])
)
self.open_receptacles = []
open_pickupable = {}
pickupable = {}
is_open = {}
for obj in filter(
lambda x: x["receptacle"], self.last_event.metadata["objects"]
):
for oid in obj["receptacleObjectIds"]:
self.object_receptacle[oid] = obj
is_open[obj["objectId"]] = obj["openable"] and obj["isOpen"]
for obj in filter(
lambda x: x["receptacle"], self.last_event.metadata["objects"]
):
for oid in obj["receptacleObjectIds"]:
if obj["openable"] or (
obj["objectId"] in self.object_receptacle
and self.object_receptacle[obj["objectId"]]["openable"]
):
open_pickupable[oid] = obj["objectId"]
else:
pickupable[oid] = obj["objectId"]
if open_pickupable.keys():
self.target_objects = random.sample(open_pickupable.keys(), k=1)
shuffled_keys = list(open_pickupable.keys())
random.shuffle(shuffled_keys)
for oid in shuffled_keys:
position_target = self.object_receptacle[self.target_objects[0]][
"position"
]
position_candidate = self.object_receptacle[oid]["position"]
dist = math.sqrt(
(position_target["x"] - position_candidate["x"]) ** 2
+ (position_target["y"] - position_candidate["y"]) ** 2
)
# try to find something that is far to avoid having the doors collide
if dist > 1.25:
self.target_objects.append(oid)
break
for roid in set(map(lambda x: open_pickupable[x], self.target_objects)):
if roid in is_open:
continue
self.open_receptacles.append(roid)
self.step(
dict(action="OpenObject", objectId=roid, forceVisible=True),
raise_for_failure=True,
)
def queue_step(self):
search_point = self.queue.popleft()
event = self.step(
dict(
action="Teleport",
x=search_point.start_position["x"],
y=search_point.start_position["y"],
z=search_point.start_position["z"],
)
)
if event.metadata["lastActionSuccess"]:
move_vec = search_point.move_vector
move_vec["moveMagnitude"] = self.grid_size
event = self.step(dict(action="Move", **move_vec))
if event.metadata["agent"]["position"]["y"] > 1.3:
# pprint(search_point.start_position)
# pprint(search_point.move_vector)
# pprint(event.metadata['agent']['position'])
raise Exception("**** got big point ")
self.enqueue_points(event.metadata["agent"]["position"])
if not any(
map(
lambda p: distance(p, event.metadata["agent"]["position"])
< self.distance_threshold,
self.grid_points,
)
):
self.grid_points.append(event.metadata["agent"]["position"])
return event
| ai2thor-main | ai2thor/controller.py |
# Copyright Allen Institute for Artificial Intelligence 2017
import requests
from progressbar import ProgressBar, Bar, Percentage, FileTransferSpeed
import hashlib
import logging
import ai2thor.build
logger = logging.getLogger(__name__)
def download(url, sha256_digest, include_private_scenes=False):
auth = None
if include_private_scenes:
auth = ai2thor.build.boto_auth()
logger.debug("Downloading file from %s" % url)
r = requests.get(url, stream=True, auth=auth)
r.raise_for_status()
size = int(r.headers["Content-Length"].strip())
total_bytes = 0
widgets = [
url.split("/")[-1],
": ",
Bar(marker="|", left="[", right=" "),
Percentage(),
" ",
FileTransferSpeed(),
"] of {0}MB".format(str(round(size / 1024 / 1024, 2))[:4]),
]
pbar = ProgressBar(widgets=widgets, maxval=size).start()
m = hashlib.sha256()
file_data = []
for buf in r.iter_content(1024):
if buf:
file_data.append(buf)
m.update(buf)
total_bytes += len(buf)
pbar.update(total_bytes)
pbar.finish()
if m.hexdigest() != sha256_digest:
raise Exception("Digest mismatch for url %s" % url)
return b"".join(file_data)
| ai2thor-main | ai2thor/downloader.py |
import os
import msgpack
import numpy as np
import requests
import cv2
import warnings
from pprint import pprint
import shutil
import copy
from ai2thor.server import Event, MultiAgentEvent, DepthFormat
from ai2thor.interact import InteractiveControllerPrompt, DefaultActions
class Controller(object):
def __init__(
self,
headless=False,
host="127.0.0.1",
port=0,
width=300,
height=300,
agent_id=0,
image_dir=".",
save_image_per_frame=False,
depth_format=DepthFormat.Meters,
camera_near_plane=0.1,
camera_far_plane=20,
):
self.host = host
self.port = port
self.headless = headless
self.last_event = {}
self.last_action = {}
self.sequence_id = 0
self.agent_id = agent_id
self.screen_width = width
self.screen_height = height
self.depth_format = depth_format
self.camera_near_plane = (camera_near_plane,)
self.camera_far_plane = camera_far_plane
if image_dir != ".":
if os.path.exists(image_dir):
shutil.rmtree(image_dir)
os.makedirs(image_dir)
self.interactive_controller = InteractiveControllerPrompt(
[
DefaultActions.MoveAhead,
DefaultActions.MoveBack,
DefaultActions.RotateLeft,
DefaultActions.RotateRight,
DefaultActions.LookUp,
DefaultActions.LookDown,
],
image_dir=image_dir,
image_per_frame=save_image_per_frame,
)
self.start(port, host, agent_id=agent_id)
def stop(self):
pass
def start(self, port=9200, host="127.0.0.1", agent_id=0, **kwargs):
self.host = host
self.port = port
self.agent_id = agent_id
# response_payload = self._post_event('start')
pprint("-- Start:")
# pprint(response_payload)
def reset(self, scene_name=None):
self.sequence_id = 0
# response_payload = self._post_event(
# 'reset', dict(action='Reset', sceneName=scene_name, sequenceId=self.sequence_id)
# )
pprint("-- Reset:")
# pprint(response_payload)
return self.last_event
def step(self, action=None, **action_args):
if type(action) is dict:
action = copy.deepcopy(action) # prevent changes from leaking
else:
action = dict(action=action)
raise_for_failure = action_args.pop("raise_for_failure", False)
action.update(action_args)
if self.headless:
action["renderImage"] = False
action["sequenceId"] = self.sequence_id
action["agentId"] = self.agent_id
self.last_action = action
rotation = action.get("rotation")
if rotation is not None and type(rotation) != dict:
action["rotation"] = {}
action["rotation"]["y"] = rotation
payload = self._post_event("step", action)
events = []
for i, agent_metadata in enumerate(payload["metadata"]["agents"]):
event = Event(agent_metadata)
third_party_width = event.screen_width
third_party_height = event.screen_height
third_party_depth_width = event.screen_width
third_party_depth_height = event.screen_height
if 'thirdPartyCameras' in agent_metadata and \
len(agent_metadata['thirdPartyCameras']) > 0 and \
'screenWidth' in agent_metadata['thirdPartyCameras'][0] and \
'screenHeight' in agent_metadata['thirdPartyCameras'][0]:
third_party_width = agent_metadata['thirdPartyCameras'][0]['screenWidth']
third_party_height = agent_metadata['thirdPartyCameras'][0]['screenHeight']
third_party_depth_width = agent_metadata['thirdPartyCameras'][0]['depthWidth']
third_party_depth_height = agent_metadata['thirdPartyCameras'][0]['depthHeight']
image_mapping = {
'image':lambda x: event.add_image(x, flip_y=False, flip_rb_colors=False),
'image-thirdParty-camera': lambda x: event.add_third_party_camera_image_robot(x, third_party_width, third_party_height),
'image_thirdParty_depth': lambda x: event.add_third_party_image_depth_robot(x, dtype=np.float64, flip_y=False, depth_format=self.depth_format, depth_width=third_party_depth_width, depth_height=third_party_depth_height),
'image_depth':lambda x: event.add_image_depth_robot(
x,
self.depth_format,
camera_near_plane=self.camera_near_plane,
camera_far_plane=self.camera_far_plane,
depth_width=agent_metadata.get('depthWidth', agent_metadata['screenWidth']),
depth_height=agent_metadata.get('depthHeight', agent_metadata['screenHeight']),
flip_y=False,
dtype=np.float64,
),
}
for key in image_mapping.keys():
if key == 'image_depth' and 'depthWidth' in agent_metadata and agent_metadata['depthWidth'] != agent_metadata['screenWidth']:
warnings.warn("Depth and RGB images are not the same resolutions")
if key in payload and len(payload[key]) > i:
image_mapping[key](payload[key][i])
events.append(event)
if len(events) > 1:
self.last_event = MultiAgentEvent(self.agent_id, events)
else:
self.last_event = events[0]
if (
not self.last_event.metadata["lastActionSuccess"]
and self.last_event.metadata["errorCode"] == "InvalidAction"
):
raise ValueError(self.last_event.metadata["errorMessage"])
if raise_for_failure:
assert self.last_event.metadata["lastActionSuccess"]
# pprint("Display event:")
# Controller._display_step_event(self.last_event)
return self.last_event
def interact(
self,
semantic_segmentation_frame=False,
instance_segmentation_frame=False,
depth_frame=False,
color_frame=False,
metadata=False,
):
self.interactive_controller.interact(
self,
semantic_segmentation_frame=semantic_segmentation_frame,
instance_segmentation_frame=instance_segmentation_frame,
depth_frame=depth_frame,
color_frame=color_frame,
metadata=metadata,
)
def _post_event(self, route="", data=None):
r = requests.post(self._get_url(route), json=data)
pprint('ACTION "{}"'.format(data["action"]))
pprint("POST")
# pprint(r.content)
pprint(r.status_code)
return msgpack.unpackb(r.content, raw=False)
def _get_url(self, route=""):
return "http://{}:{}{}".format(
self.host, self.port, "/{}".format(route) if route != "" else ""
)
@staticmethod
def _display_step_event(event):
metadata = event.metadata
pprint(metadata)
cv2.imshow("aoeu", event.cv2img)
cv2.waitKey(1000)
| ai2thor-main | ai2thor/robot_controller.py |
# Copyright Allen Institute for Artificial Intelligence 2017
__version__ = None
try:
from ai2thor._version import __version__
except ImportError:
pass
| ai2thor-main | ai2thor/__init__.py |
import subprocess
import os.path
import re
import tempfile
from ai2thor.build import COMMIT_ID
import shlex
import shutil
def pci_records():
records = []
command = shlex.split("lspci -vmm")
output = subprocess.check_output(command).decode()
for devices in output.strip().split("\n\n"):
record = {}
records.append(record)
for row in devices.split("\n"):
key, value = row.split("\t")
record[key.split(":")[0]] = value
return records
def xorg_bus_id():
bus_id = None
for r in pci_records():
if r.get("Vendor", "") == "NVIDIA Corporation" and r["Class"] in [
"VGA compatible controller",
"3D controller",
]:
bus_id = "PCI:" + ":".join(
map(lambda x: str(int(x, 16)), re.split(r"[:\.]", r["Slot"]))
)
break
if bus_id is None:
raise Exception("no valid nvidia device could be found")
return bus_id
def has_docker():
with open(os.devnull, "w") as dn:
return subprocess.call(["which", "docker"], stdout=dn) == 0
def bridge_gateway():
output = (
subprocess.check_output(
"docker network inspect -f '{{range .IPAM.Config}}{{.Gateway}}{{end}}' bridge",
shell=True,
)
.decode("ascii")
.strip()
)
if re.match(r"^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$", output):
return output
else:
raise Exception(
"Didn't receive a single ip address from network inspect bridge: %s"
% output
)
def nvidia_version():
version = None
version_path = "/proc/driver/nvidia/version"
if os.path.isfile(version_path):
with open(version_path) as f:
for line in f:
if line.startswith("NVRM version: NVIDIA"):
match = re.search(r"Kernel Module\s+([0-9\.]+)\s+", line)
if match:
version = match.group(1)
break
return version
def generate_dockerfile(tag):
driver_url = "http://us.download.nvidia.com/XFree86/Linux-x86_64/{version}/NVIDIA-Linux-x86_64-{version}.run".format(
version=nvidia_version()
)
driver_filename = os.path.basename(driver_url)
dockerfile = """
FROM ai2thor/ai2thor-base:{tag}
RUN wget {driver_url} -P /root/
RUN sh /root/{driver_filename} -s --no-kernel-module
""".format(
driver_filename=driver_filename, driver_url=driver_url, tag=tag
)
return dockerfile
def image_exists(image_name):
output = subprocess.check_output("docker images -q %s" % image_name, shell=True)
return len(output) > 0
def run(image_name, base_dir, command, environment):
allowed_keys = {"AI2THOR_PORT", "AI2THOR_CLIENT_TOKEN", "AI2THOR_HOST"}
environment_string = ""
for k, v in environment.items():
if k in allowed_keys:
environment_string += " -e %s=%s " % (k, v)
relative_command = os.path.relpath(command, base_dir)
docker_command = os.path.join("/root/.ai2thor", relative_command)
environment_string += " -e %s=%s -e %s='%s'" % (
"AI2THOR_DEVICE_BUSID",
xorg_bus_id(),
"AI2THOR_COMMAND",
docker_command,
)
command = "docker run -v {base_dir}:/root/.ai2thor -d --privileged {environment} {image_name} /root/start.sh".format(
environment=environment_string, image_name=image_name, base_dir=base_dir
)
container_id = subprocess.check_output(command, shell=True).decode("ascii").strip()
return container_id
def kill_container(container_id):
subprocess.check_output("docker kill %s" % container_id, shell=True)
def build_image():
version = nvidia_version()
image_name = "ai2thor/ai2thor-nvidia-%s:%s" % (version, COMMIT_ID)
if image_exists(image_name):
return image_name
td = tempfile.mkdtemp()
with open("%s/Dockerfile" % td, "w") as f:
f.write(generate_dockerfile(tag))
subprocess.check_call("docker build --rm -t %s %s" % (image_name, td), shell=True)
shutil.rmtree(td)
return image_name
| ai2thor-main | ai2thor/docker.py |
import Xlib.display
import glob
import warnings
import os
import ctypes.util
import xml.etree.ElementTree
class Request:
def __init__(self, system, width, height, x_display, headless):
self.system = system
self.width = width
self.height = height
self.x_display = x_display
self.headless = headless
class BasePlatform:
enabled = True
@classmethod
def validate(cls, r):
return []
@classmethod
def dependency_instructions(cls, request):
return None
@classmethod
def is_valid(cls, request):
return len(cls.validate(request)) == 0
@classmethod
def name(cls):
return cls.__name__
@classmethod
def launch_env(cls, width, height, x_display):
return {}
class BaseLinuxPlatform(BasePlatform):
@classmethod
def executable_path(cls, base_dir, name):
return os.path.join(base_dir, name)
@classmethod
def old_executable_path(cls, base_dir, name):
return cls.executable_path(base_dir, name)
class Linux64(BaseLinuxPlatform):
@classmethod
def dependency_instructions(cls, request):
message = "Linux64 requires a X11 server to be running with GLX. "
displays = cls._valid_x_displays(request.width, request.height)
if displays:
message += "The following valid displays were found %s" % (
", ".join(displays)
)
else:
message += "If you have a NVIDIA GPU, please run: sudo ai2thor-xorg start"
return message
@classmethod
def _select_x_display(cls, width, height):
valid_displays = cls._valid_x_displays(width, height)
if valid_displays:
return valid_displays[0]
else:
return None
@classmethod
def launch_env(cls, width, height, x_display):
env = dict(DISPLAY=x_display)
if env["DISPLAY"] is None:
env["DISPLAY"] = cls._select_x_display(width, height)
return env
@classmethod
def _validate_screen(cls, display_screen_str, width, height):
errors = []
try:
disp_screen = Xlib.display.Display(
display_screen_str
) # display_screen_str will have the format ":0.1"
screen_parts = display_screen_str.split(".")
if len(screen_parts) > 1:
# this Xlib.display will find a valid screen if an
# invalid screen was passed in (e.g. :0.9999999 -> :0.1)
if screen_parts[1] != str(disp_screen.get_default_screen()):
errors.append(
"Invalid display, non-existent screen: %s" % display_screen_str
)
if "GLX" not in disp_screen.list_extensions():
errors.append(
"Display %s does not have the GLX extension loaded. GLX is required by Unity3D."
% display_screen_str
)
if (
disp_screen.screen()["width_in_pixels"] < width
or disp_screen.screen()["height_in_pixels"] < height
):
errors.append(
"Display %s does not have a large enough resolution for the target resolution: %sx%s vs. %sx%s"
% (
display_screen_str,
width,
height,
disp_screen.screen()["width_in_pixels"],
disp_screen.screen()["height_in_pixels"],
)
)
if disp_screen.screen()["root_depth"] != 24:
errors.append(
"Display %s does not have a color depth of 24: %s"
% (display_screen_str, disp_screen.screen()["root_depth"])
)
except (Xlib.error.DisplayNameError, Xlib.error.DisplayConnectionError) as e:
errors.append(
"Invalid display: %s. Failed to connect %s " % (display_screen_str, e)
)
return errors
@classmethod
def _is_valid_screen(cls, display_screen_str, width, height):
return len(cls._validate_screen(display_screen_str, width, height)) == 0
@classmethod
def _valid_x_displays(cls, width, height):
open_display_strs = [
int(os.path.basename(s)[1:]) for s in glob.glob("/tmp/.X11-unix/X[0-9]*")
]
valid_displays = []
for display_str in open_display_strs:
try:
disp = Xlib.display.Display(":%s" % display_str)
for screen in range(0, disp.screen_count()):
disp_screen_str = ":%s.%s" % (display_str, screen)
if cls._is_valid_screen(disp_screen_str, width, height):
valid_displays.append(disp_screen_str)
except Xlib.error.DisplayConnectionError as e:
warnings.warn(
"could not connect to X Display: %s, %s" % (display_str, e)
)
return valid_displays
@classmethod
def validate(cls, request):
if request.headless:
return []
elif request.x_display:
return cls._validate_screen(
request.x_display, request.width, request.height
)
elif cls._select_x_display(request.width, request.height) is None:
return ["No valid X display found"]
else:
return []
class OSXIntel64(BasePlatform):
@classmethod
def old_executable_path(cls, base_dir, name):
return os.path.join(base_dir, name + ".app", "Contents/MacOS", name)
@classmethod
def executable_path(cls, base_dir, name):
plist = cls.parse_plist(base_dir, name)
return os.path.join(
base_dir, name + ".app", "Contents/MacOS", plist["CFBundleExecutable"]
)
@classmethod
def parse_plist(cls, base_dir, name):
plist_path = os.path.join(base_dir, name + ".app", "Contents/Info.plist")
with open(plist_path) as f:
plist = f.read()
root = xml.etree.ElementTree.fromstring(plist)
keys = [x.text for x in root.findall("dict/key")]
values = [x.text for x in root.findall("dict/string")]
return dict(zip(keys, values))
class CloudRendering(BaseLinuxPlatform):
enabled = True
@classmethod
def dependency_instructions(cls, request):
return "CloudRendering requires libvulkan1. Please install by running: sudo apt-get -y install libvulkan1"
@classmethod
def failure_message(cls):
pass
@classmethod
def validate(cls, request):
if ctypes.util.find_library("vulkan") is not None:
return []
else:
return ["Vulkan API driver missing."]
class WebGL(BasePlatform):
pass
class StandaloneWindows64(BasePlatform):
@classmethod
def executable_path(cls, base_dir, name):
return os.path.join(base_dir, name)
def select_platforms(request):
candidates = []
system_platform_map = dict(Linux=(Linux64,), Darwin=(OSXIntel64,), Windows=(StandaloneWindows64,))
for p in system_platform_map.get(request.system, ()):
if not p.enabled:
continue
#
# if p == CloudRendering and request.x_display is not None:
# continue
candidates.append(p)
return candidates
STR_PLATFORM_MAP = dict(
CloudRendering=CloudRendering, Linux64=Linux64, OSXIntel64=OSXIntel64, WebGL=WebGL, StandaloneWindows64=StandaloneWindows64
)
| ai2thor-main | ai2thor/platform.py |
ai2thor-main | ai2thor/camera.py |
|
class UnityCrashException(Exception):
pass
class RestartError(RuntimeError):
pass
| ai2thor-main | ai2thor/exceptions.py |
import copy
import glob
import shutil
import json
import pickle
import ai2thor.controller
from collections import defaultdict
import os
MOVE_AHEAD_MAP = {
0: dict(x=0, z=1),
90: dict(x=1, z=0),
180: dict(x=0, z=-1),
270: dict(x=-1, z=0),
}
class Controller(object):
def __init__(self, base_dir, grid_size=0.25):
self.grid_size = grid_size
self.base_dir = base_dir
if grid_size < 0.25:
raise Exception(
"must adjust granularity of key_for_point for smaller grid sizes"
)
def reset(self, scene_name):
self.scene_name = scene_name
with open("%s/%s/index.json" % (self.base_dir, self.scene_name)) as f:
self.positions = json.loads(f.read())
# for g in glob.glob('%s/%s/metadata/*.json' % (self.base_dir,self.scene_name)):
# with open(g) as f:
# j = json.loads(f.read())
# pos = j['agent']['position']
# key = key_for_point(pos['x'], pos['z'])
# pos_id = os.path.splitext(os.path.basename(g))[0]
# event_path = os.path.join('%s/%s/events/%s.pickle' % (self.base_dir, self.scene_name, pos_id))
# self.positions[key].append({'event': event_path, 'metadata': j})
# import random
# total = len(self.positions)
# p = self.positions[list(self.positions.keys())[random.randint(0, total - 1)]][3]
# self.last_event = self.load_event(p)
@property
def position_x(self):
return self.last_event.metadata["agent"]["position"]["x"]
@property
def position_z(self):
return self.last_event.metadata["agent"]["position"]["z"]
@property
def rotation(self):
return self.last_event.metadata["agent"]["rotation"]["y"]
@property
def camera_horizon(self):
return self.last_event.metadata["agent"]["cameraHorizon"]
def start(self):
pass
def load_event(self, pos):
with open(pos["event"], "rb") as f:
e = pickle.load(f)
return e
def find_position(self, x, z, rotation, camera_horizon):
for p in self.positions.get(ai2thor.controller.key_for_point(x, z), []):
if (
abs(p["rotation"] - rotation) < 1.0
and abs(p["cameraHorizon"] - camera_horizon) < 1.0
):
event = self.load_event(p)
return event
return None
def move(self, x, z):
return self.find_position(x, z, self.rotation, self.camera_horizon)
def move_back(self):
m = MOVE_AHEAD_MAP[self.rotation]
new_x = (-m["x"] * self.grid_size) + self.position_x
new_z = (-m["z"] * self.grid_size) + self.position_z
return self.move(new_x, new_z)
def move_ahead(self):
m = MOVE_AHEAD_MAP[self.rotation]
new_x = (m["x"] * self.grid_size) + self.position_x
new_z = (m["z"] * self.grid_size) + self.position_z
return self.move(new_x, new_z)
def look(self, new_horizon):
if new_horizon < -30 or new_horizon > 30:
return None
else:
return self.find_position(
self.position_x, self.position_z, self.rotation, new_horizon
)
def look_up(self):
return self.look(self.camera_horizon - 30)
def look_down(self):
return self.look(self.camera_horizon + 30)
def rotate_right(self):
new_rotation = (self.rotation + 90) % 360
return self.rotate(new_rotation)
def rotate(self, new_rotation):
return self.find_position(
self.position_x, self.position_z, new_rotation, self.camera_horizon
)
def rotate_left(self):
new_rotation = (self.rotation - 90) % 360
return self.rotate(new_rotation)
def step(self, action=None, **action_args):
if type(action) is dict:
action = copy.deepcopy(action) # prevent changes from leaking
else:
action = dict(action=action)
actions = dict(
RotateRight=self.rotate_right,
RotateLeft=self.rotate_left,
MoveAhead=self.move_ahead,
MoveBack=self.move_back,
LookUp=self.look_up,
LookDown=self.look_down,
)
event = actions[action["action"]]()
if event is None:
event = copy.deepcopy(self.last_event)
event.metadata["lastActionSuccess"] = False
event.metadata["lastAction"] = action["action"]
self.last_event = event
return event
class FrameCounter:
def __init__(self):
self.counter = 0
def inc(self):
self.counter += 1
def write_frame(event, base_dir, scene_name, frame_name):
events_dir = "%s/%s/events" % (base_dir, scene_name)
met_dir = "%s/%s/metadata" % (base_dir, scene_name)
os.makedirs(met_dir, exist_ok=True)
os.makedirs(events_dir, exist_ok=True)
with open(events_dir + "/%03d.pickle" % frame_name, "wb") as f:
pickle.dump(event, f)
with open(met_dir + "/%03d.json" % frame_name, "w") as f:
f.write(json.dumps(event.metadata))
def look_up_down_write(controller, base_dir, fc, scene_name):
fc.inc()
write_frame(controller.step(action="LookUp"), base_dir, scene_name, fc.counter)
fc.inc()
write_frame(controller.step(action="LookDown"), base_dir, scene_name, fc.counter)
fc.inc()
write_frame(controller.step(action="LookDown"), base_dir, scene_name, fc.counter)
controller.step(action="LookUp")
def index_metadata(base_dir, scene_name):
positions_index = defaultdict(list)
for g in glob.glob("%s/%s/metadata/*.json" % (base_dir, scene_name)):
with open(g) as f:
j = json.loads(f.read())
agent = j["agent"]
pos = agent["position"]
key = ai2thor.controller.key_for_point(pos["x"], pos["z"])
pos_id = os.path.splitext(os.path.basename(g))[0]
event_path = os.path.join(
"%s/%s/events/%s.pickle" % (base_dir, scene_name, pos_id)
)
positions_index[key].append(
{
"event": event_path,
"rotation": agent["rotation"]["y"],
"cameraHorizon": agent["cameraHorizon"],
}
)
with open("%s/%s/index.json" % (base_dir, scene_name), "w") as f:
f.write(json.dumps(positions_index))
def dump_scene_controller(base_dir, controller):
if controller.last_event is None:
raise Exception("Controller must be reset and intialized to a scene")
scene_name = controller.last_event.metadata["sceneName"]
fc = FrameCounter()
shutil.rmtree("%s/%s" % (base_dir, scene_name), ignore_errors=True)
event = controller.step(action="GetReachablePositions")
for p in event.metadata["actionReturn"]:
action = copy.deepcopy(p)
action["action"] = "TeleportFull"
action["horizon"] = 0.0
action["forceAction"] = True
action["rotation"] = dict(y=0.0)
event = controller.step(action)
print(fc.counter)
if event.metadata["lastActionSuccess"]:
look_up_down_write(controller, base_dir, fc, scene_name)
for i in range(3):
controller.step(action="RotateRight")
look_up_down_write(controller, base_dir, fc, scene_name)
index_metadata(base_dir, scene_name)
def dump_scene(
scene_name,
base_dir,
renderInstanceSegmentation=False,
renderDepthImage=False,
renderSemanticSegmentation=False,
):
controller = ai2thor.controller.Controller()
controller.start(height=448, width=448)
controller.reset(scene_name)
controller.step(
dict(
action="Initialize",
fieldOfView=90,
gridSize=0.25,
renderDepthImage=renderDepthImage,
renderInstanceSegmentation=renderInstanceSegmentation,
renderSemanticSegmentation=renderSemanticSegmentation,
)
)
dump_scene_controller(base_dir, controller)
controller.stop()
| ai2thor-main | ai2thor/offline_controller.py |
# GENERATED FILE - DO NOT EDIT
DEFAULT_QUALITY = 'Ultra'
QUALITY_SETTINGS = {'DONOTUSE': 0,
'High': 5,
'High WebGL': 8,
'Low': 2,
'Medium': 3,
'MediumCloseFitShadows': 4,
'Ultra': 7,
'Very High': 6,
'Very Low': 1} | ai2thor-main | ai2thor/_quality_settings.py |
# Copyright Allen Institute for Artificial Intelligence 2017
"""
ai2thor.server
Handles all communication with Unity through a Flask service. Messages
are sent to the controller using a pair of request/response queues.
"""
import json
import os
import select
import struct
import tempfile
import time
from collections import defaultdict
from enum import IntEnum, unique
from io import TextIOWrapper
from typing import Optional
import msgpack
import ai2thor.server
from ai2thor.exceptions import UnityCrashException
# FifoFields
@unique
class FieldType(IntEnum):
METADATA = 1
ACTION = 2
ACTION_RESULT = 3
RGB_IMAGE = 4
DEPTH_IMAGE = 5
NORMALS_IMAGE = 6
FLOWS_IMAGE = 7
CLASSES_IMAGE = 8
IDS_IMAGE = 9
THIRD_PARTY_IMAGE = 10
METADATA_PATCH = 11
THIRD_PARTY_DEPTH = 12
THIRD_PARTY_NORMALS = 13
THIRD_PARTY_IMAGE_IDS = 14
THIRD_PARTY_CLASSES = 15
THIRD_PARTY_FLOW = 16
END_OF_MESSAGE = 255
class FifoServer(ai2thor.server.Server):
header_format = "!BI"
header_size = struct.calcsize(header_format)
field_types = {f.value: f for f in FieldType}
server_type = "FIFO"
def __init__(
self,
width: int,
height: int,
timeout: Optional[float] = 100.0,
depth_format=ai2thor.server.DepthFormat.Meters,
add_depth_noise: bool = False,
):
self.tmp_dir = tempfile.TemporaryDirectory()
self.server_pipe_path = os.path.join(self.tmp_dir.name, "server.pipe")
self.client_pipe_path = os.path.join(self.tmp_dir.name, "client.pipe")
self.server_pipe: Optional[TextIOWrapper] = None
self.client_pipe: Optional[TextIOWrapper] = None
self.raw_metadata = None
self.raw_files = None
self._last_action_message = None
# allows us to map the enum to form field names
# for backwards compatibility
# this can be removed when the wsgi server is removed
self.form_field_map = {
FieldType.RGB_IMAGE: "image",
FieldType.DEPTH_IMAGE: "image_depth",
FieldType.CLASSES_IMAGE: "image_classes",
FieldType.IDS_IMAGE: "image_ids",
FieldType.NORMALS_IMAGE: "image_normals",
FieldType.FLOWS_IMAGE: "image_flow",
FieldType.THIRD_PARTY_IMAGE: "image-thirdParty-camera",
FieldType.THIRD_PARTY_DEPTH: "image_thirdParty_depth",
FieldType.THIRD_PARTY_NORMALS: "image_thirdParty_normals",
FieldType.THIRD_PARTY_IMAGE_IDS: "image_thirdParty_image_ids",
FieldType.THIRD_PARTY_CLASSES: "image_thirdParty_classes",
FieldType.THIRD_PARTY_FLOW: "image_thirdParty_flow",
}
self.image_fields = {
FieldType.IDS_IMAGE,
FieldType.CLASSES_IMAGE,
FieldType.FLOWS_IMAGE,
FieldType.NORMALS_IMAGE,
FieldType.DEPTH_IMAGE,
FieldType.RGB_IMAGE,
FieldType.THIRD_PARTY_IMAGE,
FieldType.THIRD_PARTY_DEPTH,
FieldType.THIRD_PARTY_NORMALS,
FieldType.THIRD_PARTY_IMAGE_IDS,
FieldType.THIRD_PARTY_CLASSES,
FieldType.THIRD_PARTY_FLOW,
}
self.eom_header = self._create_header(FieldType.END_OF_MESSAGE, b"")
super().__init__(
width=width,
height=height,
timeout=timeout,
depth_format=depth_format,
add_depth_noise=add_depth_noise
)
def _create_header(self, message_type, body):
return struct.pack(self.header_format, message_type, len(body))
def _read_with_timeout(self, server_pipe, message_size: int, timeout: Optional[float]):
if timeout is None:
return server_pipe.read(message_size)
start_t = time.time()
message = b""
while message_size > 0:
r, w, e = select.select([server_pipe], [], [], timeout)
if server_pipe in r:
part = os.read(server_pipe.fileno(), message_size)
message_size -= len(part)
message = message + part
cur_t = time.time()
if timeout is not None and cur_t - start_t > timeout:
break
if message_size != 0:
raise TimeoutError(f"Reading from AI2-THOR backend timed out (using {timeout}s) timeout.")
return message
def _recv_message(self, timeout: Optional[float]):
if self.server_pipe is None:
self.server_pipe = open(self.server_pipe_path, "rb")
metadata = None
files = defaultdict(list)
while True:
header = self._read_with_timeout(
server_pipe=self.server_pipe,
message_size=self.header_size,
timeout=self.timeout if timeout is None else timeout
) # message type + length
if len(header) == 0:
self.unity_proc.wait(timeout=5)
returncode = self.unity_proc.returncode
message = (
"Unity process has exited - check "
"~/.config/unity3d/Allen\ Institute\ for\ "
"Artificial\ Intelligence/AI2-THOR/Player.log for errors. "
f"Last action message: %s, returncode=%s"
% (self._last_action_message, self.unity_proc.returncode)
)
# we don't want to restart all process exits since its possible that a user
# kills off a Unity process with SIGTERM to end a training run
# SIGABRT is the returncode for when Unity crashes due to a segfault
if returncode in [-6, -11]: # SIGABRT, SIGSEGV
raise UnityCrashException(message)
else:
raise Exception(message)
if header[0] == FieldType.END_OF_MESSAGE.value:
# print("GOT EOM")
break
# print("got header %s" % header)
field_type_int, message_length = struct.unpack(self.header_format, header)
field_type = self.field_types[field_type_int]
body = self._read_with_timeout(
server_pipe=self.server_pipe,
message_size=message_length,
timeout=self.timeout if timeout is None else timeout
)
# print("field type")
# print(field_type)
if field_type is FieldType.METADATA:
# print("body length %s" % len(body))
# print(body)
metadata = msgpack.loads(body, raw=False, strict_map_key=False)
elif field_type is FieldType.METADATA_PATCH:
metadata_patch = msgpack.loads(body, raw=False, strict_map_key=False)
agents = self.raw_metadata["agents"]
metadata = dict(
agents=[{} for i in range(len(agents))],
thirdPartyCameras=self.raw_metadata["thirdPartyCameras"],
sequenceId=self.sequence_id,
activeAgentId=metadata_patch["agentId"],
)
for i in range(len(agents)):
metadata["agents"][i].update(agents[i])
metadata["agents"][metadata_patch["agentId"]].update(metadata_patch)
files = self.raw_files
elif field_type in self.image_fields:
files[self.form_field_map[field_type]].append(body)
else:
raise ValueError("Invalid field type: %s" % field_type)
self.raw_metadata = metadata
self.raw_files = files
return metadata, files
def _send_message(self, message_type, body):
# print("trying to write to ")
if self.client_pipe is None:
self.client_pipe = open(self.client_pipe_path, "wb")
header = self._create_header(message_type, body)
# print("len header %s" % len(header))
# print("sending body %s" % body)
# used for debugging in case of an error
self._last_action_message = body
self.client_pipe.write(header + body + self.eom_header)
self.client_pipe.flush()
def receive(self, timeout: Optional[float] = None):
metadata, files = self._recv_message(
timeout=self.timeout if timeout is None else timeout
)
if metadata is None:
raise ValueError("no metadata received from recv_message")
return self.create_event(metadata, files)
def send(self, action):
# print("got action to send")
if "sequenceId" in action:
self.sequence_id = action["sequenceId"]
else:
self.sequence_id += 1
action["sequenceId"] = self.sequence_id
# print(action)
# need to switch this to msgpack
self._send_message(
FieldType.ACTION,
json.dumps(action, cls=ai2thor.server.NumpyAwareEncoder).encode("utf8"),
)
def start(self):
os.mkfifo(self.server_pipe_path)
os.mkfifo(self.client_pipe_path)
self.started = True
# params to pass up to unity
def unity_params(self):
params = dict(
fifo_server_pipe_path=self.server_pipe_path,
fifo_client_pipe_path=self.client_pipe_path,
)
return params
def stop(self):
if self.client_pipe is not None:
self.client_pipe.close()
if self.server_pipe is not None:
self.server_pipe.close()
if self.unity_proc is not None and self.unity_proc.poll() is None:
self.unity_proc.kill()
| ai2thor-main | ai2thor/fifo_server.py |
# Copyright Allen Institute for Artificial Intelligence 2017
"""
ai2thor.server
Handles all communication with Unity through a Flask service. Messages
are sent to the controller using a pair of request/response queues.
"""
import json
import logging
import math
import os
import threading
from typing import Optional
import ai2thor.server
from ai2thor.exceptions import UnityCrashException
try:
from queue import Queue, Empty
except ImportError:
from Queue import Queue, Empty
import time
from flask import Flask, request, make_response, abort
import werkzeug
import werkzeug.serving
import werkzeug.http
logging.getLogger("werkzeug").setLevel(logging.ERROR)
werkzeug.serving.WSGIRequestHandler.protocol_version = "HTTP/1.1"
# get with timeout to allow quit
def queue_get(que, unity_proc=None, timeout: Optional[float] = 100.0):
res = None
attempts = 0
queue_get_timeout_per_try = 0.5
max_attempts = (
float("inf")
if timeout is None or timeout == float("inf") else
max(int(math.ceil(timeout / queue_get_timeout_per_try)), 1)
)
while True:
try:
res = que.get(block=True, timeout=queue_get_timeout_per_try)
break
except Empty:
attempts += 1
# we poll here for the unity proc in the event that it has
# exited otherwise we would wait indefinetly for the queue
if unity_proc:
if unity_proc.poll() is not None:
raise UnityCrashException(f"Unity process exited {unity_proc.returncode}")
# Quit if action takes more than `timeout` time to complete. Note that this
# will result in the controller being in an unrecoverable state.
if attempts >= max_attempts:
raise TimeoutError(
f"Could not get a message from the queue after {attempts} attempts "
)
return res
class BufferedIO(object):
def __init__(self, wfile):
self.wfile = wfile
self.data = []
def write(self, output):
self.data.append(output)
def flush(self):
self.wfile.write(b"".join(self.data))
self.wfile.flush()
def close(self):
return self.wfile.close()
@property
def closed(self):
return self.wfile.closed
class ThorRequestHandler(werkzeug.serving.WSGIRequestHandler):
def run_wsgi(self):
old_wfile = self.wfile
self.wfile = BufferedIO(self.wfile)
result = super(ThorRequestHandler, self).run_wsgi()
self.wfile = old_wfile
return result
class MultipartFormParser(object):
@staticmethod
def get_boundary(request_headers):
for h, value in request_headers:
if h == "Content-Type":
ctype, ct_opts = werkzeug.http.parse_options_header(value)
boundary = ct_opts["boundary"].encode("ascii")
return boundary
return None
def __init__(self, data, boundary):
self.form = {}
self.files = {}
full_boundary = b"--" + boundary
mid_boundary = b"\r\n" + full_boundary
view = memoryview(data)
i = data.find(full_boundary) + len(full_boundary)
while i >= 0:
next_offset = data.find(mid_boundary, i)
if next_offset < 0:
break
headers_offset = i + 2 # add 2 for CRLF
body_offset = data.find(b"\r\n\r\n", headers_offset)
raw_headers = view[headers_offset:body_offset]
body = view[body_offset + 4 : next_offset]
i = next_offset + len(mid_boundary)
headers = {}
for header in raw_headers.tobytes().decode("ascii").strip().split("\r\n"):
k, v = header.split(":")
headers[k.strip()] = v.strip()
ctype, ct_opts = werkzeug.http.parse_options_header(headers["Content-Type"])
cdisp, cd_opts = werkzeug.http.parse_options_header(
headers["Content-disposition"]
)
assert cdisp == "form-data"
if "filename" in cd_opts:
if cd_opts["name"] not in self.files:
self.files[cd_opts["name"]] = []
self.files[cd_opts["name"]].append(body)
else:
if ctype == "text/plain" and "charset" in ct_opts:
body = body.tobytes().decode(ct_opts["charset"])
if cd_opts["name"] not in self.form:
self.form[cd_opts["name"]] = []
self.form[cd_opts["name"]].append(body)
class WsgiServer(ai2thor.server.Server):
server_type = "WSGI"
def __init__(
self,
host,
timeout: Optional[float] = 100.0,
port=0,
threaded=False,
depth_format=ai2thor.server.DepthFormat.Meters,
add_depth_noise=False,
width=300,
height=300,
):
app = Flask(
__name__,
template_folder=os.path.realpath(
os.path.join(
os.path.dirname(os.path.abspath(__file__)), "..", "templates"
)
),
)
self.request_queue = Queue(maxsize=1)
self.response_queue = Queue(maxsize=1)
self.app = app
self.app.config.update(
PROPAGATE_EXCEPTIONS=False, JSONIFY_PRETTYPRINT_REGULAR=False
)
self.port = port
self.last_rate_timestamp = time.time()
self.frame_counter = 0
self.debug_frames_per_interval = 50
self.unity_proc = None
self.wsgi_server = werkzeug.serving.make_server(
host,
self.port,
self.app,
threaded=threaded,
request_handler=ThorRequestHandler,
)
self.stopped = False
# used to ensure that we are receiving frames for the action we sent
super().__init__(
width=width,
height=height,
timeout=timeout,
depth_format=depth_format,
add_depth_noise=add_depth_noise
)
@app.route("/ping", methods=["get"])
def ping():
return "pong"
@app.route("/train", methods=["post"])
def train():
action_returns = []
if request.headers["Content-Type"].split(";")[0] == "multipart/form-data":
form = MultipartFormParser(
request.get_data(),
MultipartFormParser.get_boundary(request.headers),
)
metadata = json.loads(form.form["metadata"][0])
# backwards compatibility
if (
"actionReturns" in form.form
and len(form.form["actionReturns"][0]) > 0
):
action_returns = json.loads(form.form["actionReturns"][0])
token = form.form["token"][0]
else:
form = request
metadata = json.loads(form.form["metadata"])
# backwards compatibility
if "actionReturns" in form.form and len(form.form["actionReturns"]) > 0:
action_returns = json.loads(form.form["actionReturns"])
token = form.form["token"]
if self.client_token and token != self.client_token:
abort(403)
if self.frame_counter % self.debug_frames_per_interval == 0:
now = time.time()
# rate = self.debug_frames_per_interval / float(now - self.last_rate_timestamp)
self.last_rate_timestamp = now
# import datetime
# print("%s %s/s" % (datetime.datetime.now().isoformat(), rate))
for i, a in enumerate(metadata["agents"]):
if "actionReturn" not in a and i < len(action_returns):
a["actionReturn"] = action_returns[i]
event = self.create_event(metadata, form.files)
self.request_queue.put_nowait(event)
self.frame_counter += 1
next_action = queue_get(self.response_queue, timeout=float("inf"))
if "sequenceId" not in next_action:
self.sequence_id += 1
next_action["sequenceId"] = self.sequence_id
else:
self.sequence_id = next_action["sequenceId"]
resp = make_response(
json.dumps(next_action, cls=ai2thor.server.NumpyAwareEncoder)
)
return resp
def _start_server_thread(self):
self.wsgi_server.serve_forever()
def start(self):
assert not self.stopped
self.started = True
self.server_thread = threading.Thread(target=self._start_server_thread)
self.server_thread.daemon = True
self.server_thread.start()
def receive(self, timeout: Optional[float] = None):
return queue_get(
self.request_queue,
unity_proc=self.unity_proc,
timeout=self.timeout if timeout is None else timeout
)
def send(self, action):
assert self.request_queue.empty()
self.response_queue.put_nowait(action)
# params to pass up to unity
def unity_params(self):
host, port = self.wsgi_server.socket.getsockname()
params = dict(host=host, port=str(port))
return params
def stop(self):
if self.started and not self.stopped:
self.send({})
self.wsgi_server.__shutdown_request = True
self.stopped = True
if self.unity_proc is not None and self.unity_proc.poll() is None:
self.unity_proc.kill()
| ai2thor-main | ai2thor/wsgi_server.py |
import sys
import numpy as np
import os
from enum import Enum
import json
class DefaultActions(Enum):
MoveRight = (0,)
MoveLeft = (1,)
MoveAhead = (2,)
MoveBack = (3,)
LookUp = (4,)
LookDown = (5,)
RotateRight = (8,)
RotateLeft = 9
# TODO tie this with actions
# class ObjectActions(Enum):
# PutObject
# MoveHandAhead
# MoveHandBack
# MoveHandRight
# MoveHandLeft
# MoveHandUp
# MoveHandDown
# DropHandObject
# PickupObject,
# OpenObject,
# CloseObject,
# ToggleObjectOff
def get_term_character():
# NOTE: Leave these imports here! They are incompatible with Windows.
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class InteractiveControllerPrompt(object):
def __init__(
self,
default_actions,
has_object_actions=True,
image_dir=".",
image_per_frame=False,
):
self.default_actions = default_actions
self.has_object_actions = has_object_actions
self.image_per_frame = image_per_frame
self.image_dir = image_dir
self.counter = 0
default_interact_commands = {
"\x1b[C": dict(action="MoveRight", moveMagnitude=0.25),
"\x1b[D": dict(action="MoveLeft", moveMagnitude=0.25),
"\x1b[A": dict(action="MoveAhead", moveMagnitude=0.25),
"\x1b[B": dict(action="MoveBack", moveMagnitude=0.25),
"\x1b[1;2A": dict(action="LookUp"),
"\x1b[1;2B": dict(action="LookDown"),
"i": dict(action="LookUp"),
"k": dict(action="LookDown"),
"l": dict(action="RotateRight"),
"j": dict(action="RotateLeft"),
"\x1b[1;2C": dict(action="RotateRight"),
"\x1b[1;2D": dict(action="RotateLeft"),
}
action_set = {a.name for a in default_actions}
self.default_interact_commands = {
k: v
for (k, v) in default_interact_commands.items()
if v["action"] in action_set
}
def interact(
self,
controller,
semantic_segmentation_frame=False,
instance_segmentation_frame=False,
depth_frame=False,
color_frame=False,
metadata=False,
):
if not sys.stdout.isatty():
raise RuntimeError("controller.interact() must be run from a terminal")
default_interact_commands = self.default_interact_commands
self._interact_commands = default_interact_commands.copy()
command_message = u"Enter a Command: Move \u2190\u2191\u2192\u2193, Rotate/Look Shift + \u2190\u2191\u2192\u2193, Quit 'q' or Ctrl-C"
print(command_message)
for a in self.next_interact_command():
new_commands = {}
command_counter = dict(counter=1)
def add_command(cc, action, **args):
if cc["counter"] < 15:
com = dict(action=action)
com.update(args)
new_commands[str(cc["counter"])] = com
cc["counter"] += 1
event = controller.step(a)
visible_objects = []
InteractiveControllerPrompt.write_image(
event,
self.image_dir,
"_{}".format(self.counter),
image_per_frame=self.image_per_frame,
semantic_segmentation_frame=semantic_segmentation_frame,
instance_segmentation_frame=instance_segmentation_frame,
color_frame=color_frame,
depth_frame=depth_frame,
metadata=metadata,
)
self.counter += 1
if self.has_object_actions:
for o in event.metadata["objects"]:
if o["visible"]:
visible_objects.append(o["objectId"])
if o["openable"]:
if o["isOpen"]:
add_command(
command_counter,
"CloseObject",
objectId=o["objectId"],
)
else:
add_command(
command_counter,
"OpenObject",
objectId=o["objectId"],
)
if o["toggleable"]:
add_command(
command_counter,
"ToggleObjectOff",
objectId=o["objectId"],
)
if len(event.metadata["inventoryObjects"]) > 0:
inventoryObjectId = event.metadata["inventoryObjects"][0][
"objectId"
]
if (
o["receptacle"]
and (not o["openable"] or o["isOpen"])
and inventoryObjectId != o["objectId"]
):
add_command(
command_counter,
"PutObject",
objectId=inventoryObjectId,
receptacleObjectId=o["objectId"],
)
add_command(
command_counter, "MoveHandAhead", moveMagnitude=0.1
)
add_command(
command_counter, "MoveHandBack", moveMagnitude=0.1
)
add_command(
command_counter, "MoveHandRight", moveMagnitude=0.1
)
add_command(
command_counter, "MoveHandLeft", moveMagnitude=0.1
)
add_command(
command_counter, "MoveHandUp", moveMagnitude=0.1
)
add_command(
command_counter, "MoveHandDown", moveMagnitude=0.1
)
add_command(command_counter, "DropHandObject")
elif o["pickupable"]:
add_command(
command_counter, "PickupObject", objectId=o["objectId"]
)
self._interact_commands = default_interact_commands.copy()
self._interact_commands.update(new_commands)
print("Position: {}".format(event.metadata["agent"]["position"]))
print(command_message)
print("Visible Objects:\n" + "\n".join(sorted(visible_objects)))
skip_keys = ["action", "objectId"]
for k in sorted(new_commands.keys()):
v = new_commands[k]
command_info = [k + ")", v["action"]]
if "objectId" in v:
command_info.append(v["objectId"])
for ak, av in v.items():
if ak in skip_keys:
continue
command_info.append("%s: %s" % (ak, av))
print(" ".join(command_info))
def next_interact_command(self):
current_buffer = ""
while True:
commands = self._interact_commands
current_buffer += get_term_character()
if current_buffer == "q" or current_buffer == "\x03":
break
if current_buffer in commands:
yield commands[current_buffer]
current_buffer = ""
else:
match = False
for k, v in commands.items():
if k.startswith(current_buffer):
match = True
break
if not match:
current_buffer = ""
@classmethod
def write_image(
cls,
event,
image_dir,
suffix,
image_per_frame=False,
semantic_segmentation_frame=False,
instance_segmentation_frame=False,
depth_frame=False,
color_frame=False,
metadata=False,
):
def save_image(name, image, flip_br=False):
# TODO try to use PIL which did not work with RGBA
# image.save(
# name
# )
import cv2
img = image
if flip_br:
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.imwrite("{}.png".format(name), img)
def array_to_image(arr, mode=None):
return arr
def json_write(name, obj):
with open("{}.json".format(name), "w") as outfile:
json.dump(obj, outfile, indent=4, sort_keys=True)
frame_writes = [
(
"color",
color_frame,
lambda event: event.frame,
array_to_image,
lambda x, y: save_image(x, y, flip_br=True),
),
(
"instance_segmentation",
instance_segmentation_frame,
lambda event: event.instance_segmentation_frame,
array_to_image,
save_image,
),
(
"class_segmentation",
semantic_segmentation_frame,
lambda event: event.semantic_segmentation_frame,
array_to_image,
save_image,
),
(
"depth",
depth_frame,
lambda event: event.depth_frame,
lambda data: array_to_image(
(255.0 / data.max() * (data - data.min())).astype(np.uint8)
),
save_image,
),
(
"depth_raw",
depth_frame,
lambda event: event.depth_frame,
lambda x: x,
lambda name, x: np.save(
name.strip(".png").strip("./")
if image_dir == "."
else name.strip(".png"),
x.astype(np.float32),
),
),
(
"metadata",
metadata,
lambda event: event.metadata,
lambda x: x,
json_write,
),
]
for frame_filename, condition, frame_func, transform, save in frame_writes:
frame = frame_func(event)
if frame is not None and condition:
frame = transform(frame)
image_name = os.path.join(
image_dir,
"{}{}".format(
frame_filename, "{}".format(suffix) if image_per_frame else ""
),
)
print("Image {}, {}".format(image_name, image_dir))
save(image_name, frame)
elif condition:
print("No frame '{}' present, call initialize with the right parameters".format(frame_filename))
| ai2thor-main | ai2thor/interact.py |
"""
A video controller for ai2thor
Basic example:
from ai2thor.controller import VideoController
with VideoController() as vc:
vc.play(vc.MoveAhead())
vc.wait(5)
vc.play(vc.MoveAhead())
vc.export_video('thor.mp4')
Known issues:
- Multi agent rotations don't work (since TeleportFull breaks when passing in an AgentID)
"""
from ai2thor.controller import Controller
import cv2
import os
from PIL import Image
import math
from math import erf, sqrt
class VideoController(Controller):
def __init__(
self,
cam_rot=dict(x=85, y=225, z=0),
cam_pos=dict(x=-1.25, y=7.0, z=-1.0),
cam_fov=60,
**controller_kwargs,
):
self.saved_frames = []
self.ceiling_off = False
self.initial_cam_rot = cam_rot.copy()
self.initial_cam_pos = cam_pos.copy()
self.initial_cam_fov = cam_fov
super().__init__(continuous=True, **controller_kwargs)
self.step(
action="AddThirdPartyCamera",
rotation=self.initial_cam_rot,
position=self.initial_cam_pos,
fieldOfView=self.initial_cam_fov,
)
def reset(self, scene=None, **init_params):
"""Changes the scene and adds a new third party camera to the initial position."""
super().reset(scene, **init_params)
return self.step(
action="AddThirdPartyCamera",
rotation=self.initial_cam_rot,
position=self.initial_cam_pos,
fieldOfView=self.initial_cam_fov,
)
def play(self, *action_generators):
"""Apply multiple actions at the same time (e.g., move multiple agents,
and pan the camera around the scene.
Examples
vc.play(vc.moveAhead())
vc.wait(60)
vc.play(vc.moveAhead(), vc.orbitCameraAnimation(0, 0, 0))"""
# action_generators should be a list of generators (e.g., moveAhead(<Params>))
# this does many transformations at the same time
while True:
# execute next actions if available
next_actions = [next(generator, False) for generator in action_generators]
# add the frame to the saved frames after all actions execute
self.saved_frames.append(self.last_event.third_party_camera_frames[0])
# remove actions with finished iterators
next_actions = [action for action in next_actions if action != False]
if not next_actions:
# exit after all generators have finished
break
def _wait(self, frames=60):
"""Yields a generator used in self.wait()"""
for _ in range(frames):
yield self.step(action="Pass")
def wait(self, frames=60):
"""Do absolutely nothing to the agent. Keep the current frame still, as is.
Params
- frames (int)=60: The duration of the do nothing action.
Note: videos are typically 30fps.
Example: vc.wait(60)"""
self.play(self._wait(frames))
def ToggleCeiling(self):
"""Hides the ceiling. This method is greatly preferred over calling
step(action='ToggleMapView') directly, since it allows for automatic
ceiling toggles in the future. (e.g., if the camera is above the
height of the room, toggle off the ceiling, and vice versa."""
self.ceiling_off = not self.ceiling_off
return self.step(action="ToggleMapView")
def _cdf(self, x, std_dev=0.5, mean=0.0):
"""Cumulative distribution function"""
return (1.0 + erf((x - mean) / sqrt(2.0 * std_dev ** 2))) / 2.0
def _linear_to_smooth(self, curr_frame, total_frames, std_dev=0.5, min_val=3):
# start at -3 STD on a normal gaussian, go to 3 STD on gaussian
# curr frame should be 1 indexed, and end with total_frames
assert min_val > 0, "Min val should be > 0"
if curr_frame == total_frames:
# removes drifting
return 1
return self._cdf(
-min_val + 2 * min_val * (curr_frame / total_frames), std_dev=std_dev
)
def _move(self, actionName, moveMagnitude, frames, smoothAnimation, agentId=None):
"""Yields a generator full of move commands to move the agent incrementally.
Used as a general move command for MoveAhead, MoveRight, MoveLeft, MoveBack."""
last_moveMag = 0
for i in range(frames):
# smoothAnimation = False => linear animation
if smoothAnimation:
next_moveMag = (
self._linear_to_smooth(i + 1, frames, std_dev=1) * moveMagnitude
)
if agentId is None:
yield self.step(
action=actionName, moveMagnitude=next_moveMag - last_moveMag
)
else:
yield self.step(
action=actionName,
moveMagnitude=next_moveMag - last_moveMag,
agentId=agentId,
)
last_moveMag = next_moveMag
else:
if agentId is None:
yield self.step(
action=actionName, moveMagnitude=moveMagnitude / frames
)
else:
yield self.step(
action=actionName,
moveMagnitude=moveMagnitude / frames,
agentId=agentId,
)
def _rotate(self, direction, rotateDegrees, frames, smoothAnimation, agentId=None):
"""Yields a generator full of step(action='TeleportFull') commands to rotate the agent incrementally."""
if agentId is not None:
raise ValueError("rotations do not yet work with multiple agents")
# make it work for left and right rotations
direction = direction.lower()
assert direction == "left" or direction == "right"
if direction == "left":
rotateDegrees *= -1
# get the initial rotation
y0 = self.last_event.metadata["agent"]["rotation"]["y"]
for i in range(frames):
# keep the position the same
p = self.last_event.metadata["agent"]["position"]
if smoothAnimation:
yield self.step(
action="TeleportFull",
rotation=y0
+ rotateDegrees * self._linear_to_smooth(i + 1, frames, std_dev=1),
**p,
)
else:
yield self.step(
action="TeleportFull",
rotation=y0 + rotateDegrees * ((i + 1) / frames),
**p,
)
def MoveAhead(self, moveMagnitude=1, frames=60, smoothAnimation=True, agentId=None):
return self._move(
"MoveAhead", moveMagnitude, frames, smoothAnimation, agentId=agentId
)
def MoveBack(self, moveMagnitude=1, frames=60, smoothAnimation=True, agentId=None):
return self._move(
"MoveBack", moveMagnitude, frames, smoothAnimation, agentId=agentId
)
def MoveLeft(self, moveMagnitude=1, frames=60, smoothAnimation=True, agentId=None):
return self._move(
"MoveLeft", moveMagnitude, frames, smoothAnimation, agentId=agentId
)
def MoveRight(self, moveMagnitude=1, frames=60, smoothAnimation=True, agentId=None):
return self._move(
"MoveRight", moveMagnitude, frames, smoothAnimation, agentId=agentId
)
def RotateRight(
self, rotateDegrees=90, frames=60, smoothAnimation=True, agentId=None
):
# do incremental teleporting
return self._rotate(
"right", rotateDegrees, frames, smoothAnimation, agentId=agentId
)
def RotateLeft(
self, rotateDegrees=90, frames=60, smoothAnimation=True, agentId=None
):
# do incremental teleporting
return self._rotate(
"left", rotateDegrees, frames, smoothAnimation, agentId=agentId
)
def OrbitCameraAnimation(
self,
centerX,
centerZ,
posY,
dx=6,
dz=6,
xAngle=55,
frames=60,
orbit_degrees_per_frame=0.5,
):
"""Orbits the camera around the scene.
Example: https://www.youtube.com/watch?v=KcELPpdN770&feature=youtu.be&t=14"""
degrees = frames * orbit_degrees_per_frame
rot0 = self.last_event.metadata["thirdPartyCameras"][0]["rotation"][
"y"
] # starting angle
for frame in range(frames):
yAngle = rot0 + degrees * (frame + 1) / frames
yield self.step(
action="UpdateThirdPartyCamera",
thirdPartyCameraId=0,
rotation={"x": xAngle, "y": yAngle, "z": 0},
position={
"x": centerX - dx * math.sin(math.radians(yAngle)),
"y": posY,
"z": centerZ - dz * math.cos(math.radians(yAngle)),
},
)
def RelativeCameraAnimation(self, px=0, py=0, pz=0, rx=0, ry=0, rz=0, frames=60):
"""Linear interpolation between the current camera position and rotation
and the final camera position, given by deltas to the current values.
Params
- px (int)=0: x offset from the current camera position.
- py (int)=0: y offset from the current camera position.
- pz (int)=0: z offset from the current camera position.
- rx (int)=0: x offset from the current camera rotation.
- ry (int)=0: y offset from the current camera rotation.
- rz (int)=0: z offset from the current camera rotation.
- frames (int)=60: The duration of the animation.
Note: videos are typically 30fps."""
for _ in range(frames):
cam = self.last_event.metadata["thirdPartyCameras"][0]
pos, rot = cam["position"], cam["rotation"]
yield self.step(
action="UpdateThirdPartyCamera",
thirdPartyCameraId=0,
rotation={
"x": rot["x"] + rx / frames,
"y": rot["y"] + ry / frames,
"z": rot["z"] + rz / frames,
},
position={
"x": pos["x"] + px / frames,
"y": pos["y"] + py / frames,
"z": pos["z"] + pz / frames,
},
)
def AbsoluteCameraAnimation(
self,
px,
py,
pz,
rx,
ry,
rz,
frames=60,
smartSkybox=True,
FOVstart=None,
FOVend=None,
visibleAgents=True,
):
cam = self.last_event.metadata["thirdPartyCameras"][0]
p0, r0 = cam["position"], cam["rotation"]
if smartSkybox:
# toggles on and off (to give the same final result) to find the height of the ceiling
event0 = self.step(action="ToggleMapView")
event1 = self.step(action="ToggleMapView")
if event0.metadata["actionReturn"]:
maxY = event0.metadata["actionReturn"]["y"]
else:
maxY = event1.metadata["actionReturn"]["y"]
for i in range(1, frames + 1):
if self.ceiling_off and maxY > p0["y"] + (py - p0["y"]) / frames * i:
# turn ceiling on
self.ToggleCeiling()
kwargs = {
"action": "UpdateThirdPartyCamera",
"thirdPartyCameraId": 0,
"rotation": {
"x": r0["x"] + (rx - r0["x"]) / frames * i,
"y": r0["y"] + (ry - r0["y"]) / frames * i,
"z": r0["z"] + (rz - r0["z"]) / frames * i,
},
"position": {
"x": p0["x"] + (px - p0["x"]) / frames * i,
"y": p0["y"] + (py - p0["y"]) / frames * i,
"z": p0["z"] + (pz - p0["z"]) / frames * i,
},
}
# enables linear animation changes to the camera FOV
if FOVstart is not None and FOVend is not None:
kwargs["fieldOfView"] = FOVstart + (FOVend - FOVstart) / frames * i
if not (smartSkybox and maxY > p0["y"] + (py - p0["y"]) / frames * i):
kwargs["skyboxColor"] = "black"
yield self.step(**kwargs)
def LookUp(self):
raise NotImplementedError()
def LookDown(self):
raise NotImplementedError()
def Stand(self):
"""Note: have not found an easy way to move the agent in-between
stand and crouch."""
raise NotImplementedError()
def Crouch(self):
"""Note: have not found an easy way to move the agent in-between
stand and crouch."""
raise NotImplementedError()
def export_video(self, path):
"""Merges all the saved frames into a .mp4 video and saves it to `path`"""
if self.saved_frames:
path = path if path[:-4] == ".mp4" else path + ".mp4"
if os.path.exists(path):
os.remove(path)
video = cv2.VideoWriter(
path,
cv2.VideoWriter_fourcc(*"DIVX"),
30,
(self.saved_frames[0].shape[1], self.saved_frames[0].shape[0]),
)
for frame in self.saved_frames:
# assumes that the frames are RGB images. CV2 uses BGR.
video.write(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
cv2.destroyAllWindows()
video.release()
def export_frames(self, path, file_type=".png"):
"""Exports all of the presently frames to the `path` directory.
The frames are numbered in sequential order (starting with 0)."""
for i in range(len(self.saved_frames)):
p = os.path.join(path, f"{i}.{file_type}")
if os.path.exists(p):
os.remove(p)
Image.fromarray(self.saved_frames[i]).save(p)
def merge_video(self, other_video_path):
"""Concatenates the frames of `other_video_path` to the presently
generated video within this class."""
vidcap = cv2.VideoCapture(other_video_path)
success, image = vidcap.read()
i = 0
while success:
if i % 2 == 0:
rgb = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
self.saved_frames.append(rgb)
success, image = vidcap.read()
i += 1
| ai2thor-main | ai2thor/video_controller.py |
from abc import abstractmethod, ABC
from collections import defaultdict
from functools import reduce
from operator import itemgetter
import ai2thor.controller
import copy
import functools
import inspect
import itertools
import json
import logging
import numpy as np
import operator
import os
import platform
import random
import sys
import time
from typing import Dict, List, Set, Tuple, Union, TYPE_CHECKING, Any, Optional
BENCHMARKING_S3_BUCKET = "ai2-thor-benchmark"
FORMAT = "%(asctime)s %(message)s"
logger = logging.getLogger(__name__)
class BenchmarkConfig:
def __init__(
self,
benchmarker_class_names: List[str],
init_params: Dict[str, Any],
name: str = "",
config_name: str = "",
scenes: Optional[List[str]] = None,
procedural_houses: Optional[List[Dict[str, Any]]] = None,
action_group_sample_count: int = 1,
experiment_sample_count: int = 100,
filter_object_types: Union[None, str, List[str]] = None,
random_teleport_before_action_group: bool = False,
include_per_action_breakdown: bool = False,
only_transformed_aggregates: bool = True,
verbose: bool = False,
output_file: str = "benchmark.json",
):
if verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.WARNING)
subclasses = [cls.__name__ for cls in Benchmarker.__subclasses__()]
subclasses_set = set(subclasses)
if len(subclasses) != len(subclasses_set):
duplicated = [x for x in subclasses_set if subclasses.count(x) > 1]
logger.warning(f"Duplicated subclasses of Benchmarker '{duplicated}'")
benchmarker_map = {cls.__name__: cls for cls in Benchmarker.__subclasses__()}
self.benchmarkers = []
for benchmarker_class in benchmarker_class_names:
if benchmarker_class in benchmarker_map:
self.benchmarkers.append(
benchmarker_map[benchmarker_class](only_transformed_aggregates)
)
else:
raise ValueError(
f"Invalid benchmarker class '{benchmarker_class}'. Available {str.join(benchmarker_map.keys(), ', ')}"
)
self.init_params = init_params
self.action_sample_count = action_group_sample_count
self.experiment_sample_count = experiment_sample_count
self.scenes = scenes
self.procedural_houses = procedural_houses
self.output_file = output_file
self.include_per_action_breakdown = include_per_action_breakdown
self.only_transformed_aggregates = only_transformed_aggregates
self.verbose = verbose
self.filter_object_types = filter_object_types
self.teleport_random_before_actions = random_teleport_before_action_group
self.name = name
self.config_name = config_name
class Benchmarker(ABC):
def __init__(self, only_transformed_key=False):
self.only_transformed_key = False
pass
@abstractmethod
def aggregate_key(self):
raise NotImplementedError
@abstractmethod
def transformed_key(self):
raise NotImplementedError
@abstractmethod
def name(self):
raise NotImplementedError
@abstractmethod
def benchmark(self, env, action_config, add_key_values={}):
raise NotImplementedError
def aggregate_by(self, records, dimensions, transform=True, aggregate_out_key=None):
if not isinstance(dimensions, list):
dimensions = [dimensions]
if aggregate_out_key is None:
aggregate_out_key = self.aggregate_key()
grouper = itemgetter(*dimensions)
transform = lambda x: self.transform_aggregate(x) if transform else lambda x: x
groups = itertools.groupby(sorted(records, key=grouper), grouper)
groups = [(dimension, list(slice)) for dimension, slice in groups]
aggregated_groups = {
dimension: transform(
{
"count": len(slice),
aggregate_out_key: np.sum([v[self.aggregate_key()] for v in slice])
/ len(slice),
}
)
for dimension, slice in groups
}
return aggregated_groups
@abstractmethod
def transform_aggregate(self, report):
raise NotImplementedError
class SimsPerSecondBenchmarker(Benchmarker):
def __init__(self, only_transformed_key=False):
self.only_transformed_key = only_transformed_key
pass
def aggregate_key(self):
return "average_frametime"
def transformed_key(self):
return "average_sims_per_second"
def name(self):
return "Simulations Per Second"
def benchmark(self, env, action_config, add_key_values={}):
start = time.perf_counter()
env.step(dict(action=action_config["action"], **action_config["args"]))
end = time.perf_counter()
frame_time = end - start
record = {
"action": action_config["action"],
"count": 1,
self.aggregate_key(): frame_time,
}
record = {**record, **add_key_values}
return record
def transform_aggregate(self, report):
report[self.transformed_key()] = 1 / report[self.aggregate_key()]
if self.only_transformed_key:
del report[self.aggregate_key()]
return report
class UnityActionBenchmarkRunner(BenchmarkConfig):
def __clean_action(self, action: Union[str, Dict[str, Any]]):
print(f"__clean_action: {action}")
if isinstance(action, str):
return {"action": action, "args": {}}
if "args" not in action:
action_name = action.pop('action', None)
return {"action":action_name, "args":{**action}}
else:
return {**action, "args": action.get("args", {})}
def __get_complete_action_dict(self, action_group):
group_copy = copy.deepcopy(action_group)
actions_copy = group_copy["actions"]
for a in actions_copy:
print(f"Action {a}")
print(f"groupc {group_copy}")
group_copy["actions"] = [
self.__clean_action(a)
for a in actions_copy
# if (not isinstance(a, Dict)) or "action" not in a
]
print(f"groupc {group_copy}")
if "sample_count" not in group_copy:
group_copy["sample_count"] = self.action_sample_count
default_selector = lambda x: random.choice(x)
if isinstance(group_copy["selector"], str):
if group_copy["selector"] == "random":
group_copy["selector"] = default_selector
elif group_copy["selector"] == "sequence":
it = iter(group_copy["actions"])
group_copy["selector"] = lambda x: next(it)
group_copy["sample_count"] = len(group_copy["actions"])
# TODO: potentially add more selectors
if group_copy["selector"] is None:
group_copy["selector"] = default_selector
# TODO: Arg selector for potentially sending different values as arguments
return group_copy
def __create_procedural_house(self, env, procedural_house):
if procedural_house:
logger.info("Creating procedural house: ".format(procedural_house["id"]))
evt = env.step(action="CreateHouse", house=procedural_house)
return evt.metadata["lastActionSuccess"]
else:
return False
def __set_object_filter(self, env):
if self.filter_object_types is not None and self.filter_object_types != "":
if self.filter_object_types == "*":
logger.info("-- Filter All Objects From Metadata")
env.step(action="SetObjectFilter", objectIds=[])
elif isinsatance(self.filter_object_types, str):
evt = env.step(
action="SetObjectFilterForType",
objectTypes=[self.filter_object_types],
)
logger.info(
"Filter action, Success: {}, error: {}".format(
evt.metadata["lastActionSuccess"], evt.metadata["errorMessage"]
)
)
elif isinsatance(self.filter_object_types, list):
types = self.filter_object_types
evt = env.step(action="SetObjectFilterForType", objectTypes=types)
logger.info(
"Filter action, Success: {}, error: {}".format(
evt.metadata["lastActionSuccess"], evt.metadata["errorMessage"]
)
)
def __teleport_to_random_reachable(self, env, house=None):
evt = env.step(action="GetReachablePositions")
if (
house is not None
and "metadata" in house
and not evt.metadata["lastActionSuccess"]
):
if "agent" in house["metadata"]:
logger.info("Teleporting")
evt = env.step(
dict(action="TeleportFull", forceAction=True, **house["metadata"])
)
if (
not evt.metadata["lastActionSuccess"]
or evt.metadata["actionReturn"] is None
or len(evt.metadata["actionReturn"]) == 0
) and house is not None:
# teleport within scene for reachable positions to work
def centroid(poly):
n = len(poly)
total = reduce(
lambda acc, e: {
"x": acc["x"] + e["x"],
"y": acc["y"] + e["y"],
"z": acc["z"] + e["z"],
},
poly,
{"x": 0, "y": 2, "z": 0},
)
return {"x": total["x"] / n, "y": total["y"] / n, "z": total["z"] / n}
pos = {"x": 0, "y": 2, "z": 0}
if house["rooms"] and len(house["rooms"]) > 0:
poly = house["rooms"][0]["floorPolygon"]
pos = centroid(poly)
evt = env.step(
dict(
action="TeleportFull",
x=pos["x"],
y=pos["y"],
z=pos["z"],
rotation=dict(x=0, y=0, z=0),
horizon=0.0,
standing=True,
forceAction=True,
)
)
logger.info("--Teleport, " + " err: " + evt.metadata["errorMessage"])
evt = env.step(action="GetReachablePositions")
logger.info(
"-- GetReachablePositions success: {}, message: {}".format(
evt.metadata["lastActionSuccess"], evt.metadata["errorMessage"]
)
)
if len(evt.metadata["actionReturn"]):
reachable_pos = evt.metadata["actionReturn"]
pos = random.choice(reachable_pos)
rot = random.choice([0, 90, 180, 270])
evt = env.step(
dict(
action="TeleportFull",
x=pos["x"],
y=pos["y"],
z=pos["z"],
rotation=dict(x=0, y=rot, z=0),
horizon=0.0,
standing=True,
)
)
def benchmark(self, action_map={}):
print(action_map)
action_map = {
k: self.__get_complete_action_dict(group) for k, group in action_map.items()
}
print(action_map)
args = self.init_params
controller_params = copy.deepcopy(args)
if "server_class" in args:
controller_params["server_type"] = controller_params[
"server_class"
].server_type
del controller_params["server_class"]
env = ai2thor.controller.Controller(**args)
if self.scenes:
if isinstance(self.scenes, list):
scene_list = self.scenes
elif isinstance(self.scenes, list):
scene_list = [self.scenes]
else:
scene_list = []
scene_list = [(scene, None) for scene in scene_list]
if self.procedural_houses:
scene_list = scene_list + [
("Procedural", house) for house in self.procedural_houses
]
experiment_list = [
[
[
(scene, procedural_house, benchmarker, i)
for benchmarker in self.benchmarkers
]
for i in range(self.experiment_sample_count)
]
for (scene, procedural_house) in scene_list
]
experiment_list = functools.reduce(
operator.iconcat,
functools.reduce(operator.iconcat, experiment_list, []),
[],
)
# Filter out procedural scenes without houses to benchmark, maybe change in the future if we want to benchmark Procedural by itself
experiment_list = [
(scene, procedural_house, x, y)
for (scene, procedural_house, x, y) in experiment_list
if not (scene == "Procedural" and procedural_house is None)
]
benchmark_map = {
"title": self.name,
"config": self.config_name,
"benchmarks": defaultdict(lambda: defaultdict(lambda: {})),
"controller_params": controller_params,
"benchmark_params": {
"platform": platform.system(),
"arch": env._build.platform.__name__,
"commit_id": env._build.commit_id,
"filter_object_types": self.filter_object_types,
"action_sample_number": self.action_sample_count,
},
}
total_average_ft = 0
scene_count = 0
records = []
print("Exp list")
print(experiment_list)
for scene, procedural_house, benchmarker, experiment_index in experiment_list:
logger.info("Loading scene '{}'.".format(scene))
env.reset(scene)
house = procedural_house
house_id = ""
if house is not None:
success = self.__create_procedural_house(env, house)
if not success:
logger.warn(
f"Procedural house creation failed for house {house['id']}"
)
continue
house_id = house["id"]
logger.info(f"------ Scene: '{scene}', house={house_id}")
self.__set_object_filter(env)
for action_group_name, action_group in action_map.items():
self.__teleport_to_random_reachable(env, house)
for i in range(action_group["sample_count"]):
# print(f"Selector {action_group['selector']} action_g? {action_group} actions {action_group['actions']}")
action_config = action_group["selector"](action_group["actions"])
print(f"---- benchmarking action: {action_config}")
record = benchmarker.benchmark(
env,
action_config,
{
"action_group": action_group_name,
"house": house_id,
"scene": scene,
"experiment_index": experiment_index,
"benchmarker": benchmarker.name(),
},
)
records.append(record)
env.stop()
by_benchmarker = {}
by_scene = {}
by_action = {}
by_action_group = {}
for benchmarker in self.benchmarkers:
by_benchmarker.update(benchmarker.aggregate_by(records, "benchmarker"))
by_scene.update(
benchmarker.aggregate_by(records, ["scene", "house", "benchmarker"])
)
if self.include_per_action_breakdown:
by_action.update(
benchmarker.aggregate_by(
records, ["scene", "house", "benchmarker", "action"]
)
)
by_action_group.update(
benchmarker.aggregate_by(
records, ["scene", "house", "benchmarker", "action_group"]
)
)
house_or_scene = lambda scene, house: scene if scene != "Procedural" else house
benchmark_map["action_groups"] = {
group_name: [a["action"] for a in group["actions"]]
for group_name, group in action_map.items()
}
for (
scene,
house_id,
benchmarker_name,
action_group,
), aggregate in by_action_group.items():
benchmark_map["benchmarks"][benchmarker_name][
house_or_scene(scene, house_id)
][action_group] = aggregate
for (
scene,
house_id,
benchmarker_name,
action_name,
), aggregate in by_action.items():
benchmark_map["benchmarks"][benchmarker_name][
house_or_scene(scene, house_id)
][action_name] = aggregate
for (scene, house_id, benchmarker_name), aggregate in by_scene.items():
benchmark_map["benchmarks"][benchmarker_name][
house_or_scene(scene, house_id)
]["scene"] = aggregate
if scene == "Procedural":
benchmark_map["benchmarks"][benchmarker_name][
house_or_scene(scene, house_id)
]["scene"]["procedural"] = True
for benchmarker_name, aggregate in by_benchmarker.items():
benchmark_map["benchmarks"][benchmarker_name]["global"] = aggregate
if scene_count:
benchmark_map["average_framerate_seconds"] = total_average_ft / scene_count
return benchmark_map
| ai2thor-main | ai2thor/benchmarking.py |
import json
import math
import copy
def vector_distance(v0, v1):
dx = v0["x"] - v1["x"]
dy = v0["y"] - v1["y"]
dz = v0["z"] - v1["z"]
return math.sqrt(dx * dx + dy * dy + dz * dz)
def path_distance(path):
distance = 0
for i in range(0, len(path) - 1):
distance += vector_distance(path[i], path[i + 1])
return distance
def compute_spl(episodes_with_golden):
"""
Computes batch SPL from episode list
:param episodes_with_golden:
sequence of episode object, an episode should include the following keys:
'path'
the path to evaluate and 'shortest_path' the shortest path
as returned by 'get_shortest_path_to_object'.
Both as sequences with values of the form
dict(x=float, y=float, z=float)
'success' boolean, a 0 for a failed path 1 for a successful one
:return: returns a float representing the spl
"""
N = len(episodes_with_golden)
eval_sum = 0.0
for i, episode in enumerate(episodes_with_golden):
path = episode["path"]
shortest_path = episode["shortest_path"]
eval_sum += compute_single_spl(path, shortest_path, episode["success"])
return eval_sum / N
def compute_single_spl(path, shortest_path, successful_path):
"""
Computes SPL for a path dict(x=float, y=float, z=float)
:param path: Sequence of dict(x=float, y=float, z=float) representing the path to evaluate
:param shortest_path: Sequence of dict(x=float, y=float, z=float) representing the shortest oath
:param successful_path: boolean indicating if the path was successful, 0 for a failed path or 1 for a successful one
:return:
"""
Si = 1 if successful_path == True or successful_path == 1 else 0
li = path_distance(shortest_path)
pi = path_distance(path)
if max(pi, li) > 0:
pl_ratio = li / max(pi, li)
else:
pl_ratio = 1.0
spl = Si * pl_ratio
return spl
def get_shortest_path_to_object(
controller, object_id, initial_position, initial_rotation=None
):
"""
Computes the shortest path to an object from an initial position using a controller
:param controller: agent controller
:param object_id: string with id of the object
:param initial_position: dict(x=float, y=float, z=float) with the desired initial rotation
:param initial_rotation: dict(x=float, y=float, z=float) representing rotation around axes or None
:return:
"""
args = dict(
action="GetShortestPath",
objectId=object_id,
position=initial_position,
)
if initial_rotation is not None:
args["rotation"] = initial_rotation
event = controller.step(args)
if event.metadata["lastActionSuccess"]:
return event.metadata["actionReturn"]["corners"]
else:
raise ValueError(
"Unable to find shortest path for objectId '{}'".format(object_id)
)
def get_shortest_path_to_object_type(
controller, object_type, initial_position, initial_rotation=None, allowed_error=None
):
"""
Computes the shortest path to an object from an initial position using a controller
:param controller: agent controller
:param object_type: string that represents the type of the object
:param initial_position: dict(x=float, y=float, z=float) with the desired initial rotation
:param initial_rotation: dict(x=float, y=float, z=float) representing rotation around axes or None
:param allowed_error: a floating point number describing the total amount of error allowed in specifying
the start and end point in the shortest path computation. This number should be non-zero to allow for
floating point issues and can be made larger to make this method more robust to edge cases where
ai2thor "thinks" no path exists (this comes at the cost of some added noise to the start/end positions of the
path). Passing `None` to this argument (the default) will result in THOR choosing it to be some small value,
note that this value will not be exactly 0 to be robust to floating point inaccuracies.
"""
kwargs = dict(
action="GetShortestPath",
objectType=object_type,
position=initial_position,
)
if initial_rotation is not None:
kwargs["rotation"] = initial_rotation
if allowed_error is not None:
kwargs["allowedError"] = allowed_error
event = controller.step(kwargs)
if event.metadata["lastActionSuccess"]:
return event.metadata["actionReturn"]["corners"]
else:
raise ValueError(
"Unable to find shortest path for object type '{}' due to error '{}'.".format(
object_type, event.metadata["errorMessage"]
)
)
def get_shortest_path_to_point(
controller, initial_position, target_position, allowed_error=None
):
"""
Computes the shortest path to a point from an initial position using an agent controller
:param controller: agent controller
:param initial_position: dict(x=float, y=float, z=float) with the desired initial rotation
:param target_position: dict(x=float, y=float, z=float) representing target position
:param allowed_error: See documentation of the `get_shortest_path_to_object_type` method.
:return:
"""
kwargs = dict(
action="GetShortestPathToPoint",
position=initial_position,
x=target_position["x"],
y=target_position["y"],
z=target_position["z"],
)
if allowed_error is not None:
kwargs["allowedError"] = allowed_error
event = controller.step(kwargs)
if event.metadata["lastActionSuccess"]:
return event.metadata["actionReturn"]["corners"]
else:
raise ValueError(
"Unable to find shortest path to point '{}' due to error '{}'.".format(
target_position, event.metadata["errorMessage"]
)
)
def get_episodes_with_shortest_paths(controller, episodes):
"""
Computes shortest path for an episode sequence
:param controller: agent controller
:param episodes: sequence of episode object required fields:
'target_object_id' string representing the object to look for
'initial_position' dict(x=float, y=float, z=float) of starting position
'initial_rotation' dict(x=float, y=float, z=float) representing rotation
around axes
:return:
"""
episodes_with_golden = copy.deepcopy(episodes)
for _, episode in enumerate(episodes_with_golden):
controller.reset(episode["scene"])
try:
if "target_object_id" in episode:
episode["shortest_path"] = get_shortest_path_to_object(
controller,
episode["target_object_id"],
{
"x": episode["initial_position"]["x"],
"y": episode["initial_position"]["y"],
"z": episode["initial_position"]["z"],
},
{
"x": episode["initial_rotation"]["x"],
"y": episode["initial_rotation"]["y"],
"z": episode["initial_rotation"]["z"],
},
)
else:
episode["shortest_path"] = get_shortest_path_to_object_type(
controller,
episode["target_object_type"],
{
"x": episode["initial_position"]["x"],
"y": episode["initial_position"]["y"],
"z": episode["initial_position"]["z"],
},
{
"x": episode["initial_rotation"]["x"],
"y": episode["initial_rotation"]["y"],
"z": episode["initial_rotation"]["z"],
},
)
except ValueError:
raise ValueError(
"Unable to find shortest path for objectId '{}' in episode '{}'".format(
episode["target_object_id"],
json.dumps(episode, sort_keys=True, indent=4),
)
)
return episodes_with_golden
| ai2thor-main | ai2thor/util/metrics.py |
import numpy as np
REAL_2_SIM_TRANSFORM = np.array(
[[1.00854301, -0.0111386, 0.51920809], [0.00316833, 0.97336625, -1.15532594]]
)
def transform_real_2_sim(real_position):
"""
Transforms a position from the 'real' coordinate system to the 'sim' coordinate system.
:param real_position: dictionary with 'x', 'y' and 'z' keys to floating point values
:return: position in sim space as dictionary with 'x', 'y' and 'z' keys to floating point values
"""
real_pos = np.array([real_position["x"], real_position["y"], 1])
sim_pos_np = np.dot(REAL_2_SIM_TRANSFORM, real_pos)
sim_pos = {"x": sim_pos_np[0], "y": 0.9010001, "z": sim_pos_np[1]}
return sim_pos
| ai2thor-main | ai2thor/util/transforms.py |
import numpy as np
import math
class TrialMetric(object):
def init_trials(self, num_trials, metadata):
...
def update_with_trial(self, trial_index, metadata):
...
class ObjectPositionVarianceAverage(TrialMetric):
"""
Metric that computes the average of the variance of all objects in a scene across multiple runs.
"""
def __init__(self):
self.trials = []
self.object_ids = []
def init_trials(self, num_trials, metadata):
objects = metadata["objects"]
self.object_ids = sorted([o["objectId"] for o in objects])
num_objects = len(self.object_ids)
self.trials = np.empty([num_trials, num_objects, 3])
def update_with_trial(self, trial_index, metadata):
objects = metadata["objects"]
object_pos_map = {
o["objectId"]: vec_to_np_array(o["position"]) for o in objects
}
for object_index in range(len(self.object_ids)):
object_id = self.object_ids[object_index]
self.trials[trial_index][object_index] = object_pos_map[object_id]
def compute(self, n=None):
return np.mean(np.var(self.trials[:n], axis=0))
def vec_to_np_array(vec):
return np.array([vec["x"], vec["y"], vec["z"]])
def trial_runner(controller, number, metric, compute_running_metric=False):
"""
Generator that wraps metric capture from controller metadata for a number of trials
:param controller: ai2thor controller
:param number: int number of trials to collect metrics from
:param metric: TrialMetric the metric to use
:param compute_running_metric: bool whether or not to compute the metric after every trial
:return: tuple(controller, float) with the controller and the metric after every trial
"""
metric.init_trials(number, controller.last_event.metadata)
for trial_index in range(number):
try:
yield controller, metric.compute(
n=trial_index
) if compute_running_metric else math.nan
metric.update_with_trial(trial_index, controller.last_event.metadata)
controller.reset()
except RuntimeError as e:
print(
e,
"Last action status: {}".format(
controller.last_event.meatadata["actionSuccess"]
),
controller.last_event.meatadata["errorMessage"],
)
yield controller, metric.compute()
| ai2thor-main | ai2thor/util/trials.py |
import re
def scene_names_key_func(scene_name):
"""
Key function for sorting scenes with the naming convention that was used
"""
m = re.search("FloorPlan[_]?([a-zA-Z\-]*)([0-9]+)_?([0-9]+)?.*$", scene_name)
last_val = m.group(3) if m.group(3) is not None else -1
return m.group(1), int(m.group(2)), int(last_val)
| ai2thor-main | ai2thor/util/scene_utils.py |
from pprint import pprint
from unityparser import UnityDocument
# Requires unity parser, run:
# pip install unityparser
def updateNavMeshParamsForScene(scene_file_name):
pprint("Updating file '{}'...".format(scene_file_name))
doc = UnityDocument.load_yaml(scene_file_name)
for entry in doc.entries:
if entry.__class__.__name__ == "NavMeshSettings":
# print(entry.__class__.__name__)
buildSettings = getattr(entry, "m_BuildSettings", None)
# pprint(buildSettings)
buildSettings["agentRadius"] = "0.2"
buildSettings["agentHeight"] = "1.8"
buildSettings["agentClimb"] = "0.5"
buildSettings["manualCellSize"] = "1"
buildSettings["cellSize"] = "0.03"
doc.dump_yaml()
def GetRoboSceneNames(
last_index, last_subIndex, nameTemplate, prefix_path="unity/Assets/Scenes"
):
return [
"{}/FloorPlan_{}{}_{}.unity".format(prefix_path, nameTemplate, i, j)
for i in range(1, last_index + 1)
for j in range(1, last_subIndex + 1)
]
def GetSceneNames(
start_index, last_index, nameTemplate="", prefix_path="unity/Assets/Scenes"
):
return [
"{}/FloorPlan{}{}_physics.unity".format(prefix_path, i, nameTemplate)
for i in range(start_index, last_index + 1)
]
def main():
# testSceneNames = GetRoboSceneNames(3, 5, "Val")
# valSceneNames = GetRoboSceneNames(2, 2, "test-dev", "unity/Assets/Private/Scenes")
# trainSceneNames = GetRoboSceneNames(12, 5, "Train")
# allScenes = testSceneNames + trainSceneNames
# allScenes = valSceneNames
iThorScenes = (
GetSceneNames(1, 30)
+ GetSceneNames(201, 230)
+ GetSceneNames(301, 330)
+ GetSceneNames(401, 430)
+ GetSceneNames(501, 530)
)
allScenes = iThorScenes
# print(allScenes)
for scene_file_name in allScenes:
updateNavMeshParamsForScene(scene_file_name)
# print(scene_file_name)
if __name__ == "__main__":
main()
# Exceptions:
# Scene FloorPlan_Train7_1
# Train_11_3 unmade bed
# Val2_3 unamde bed
| ai2thor-main | ai2thor/util/scene_yaml_edit.py |
import os
import random
import time
def makedirs(directory):
os.makedirs(directory, exist_ok=True)
def atomic_write(path, data):
tmp_path = "-".join([path, str(time.time()), str(random.random())])
mode = "w"
if type(data) is bytes:
mode = "wb"
with open(tmp_path, mode) as f:
f.write(data)
os.rename(tmp_path, path)
| ai2thor-main | ai2thor/util/__init__.py |
import os
import numpy as np
dir_path = os.path.dirname(os.path.realpath(__file__))
def generate_noise_indices(img_size):
img_size = int(img_size)
default_size = 300
corners = np.load(os.path.join(dir_path, "depth_noise.npy"), allow_pickle=True)
indices = []
for j, corner in enumerate(corners):
height_indices = np.array([], dtype=np.int32)
width_indices = np.array([], dtype=np.int32)
if img_size != default_size:
idx = 0 if j <= 1 else len(corner) - 1
width = corner[idx]
height = len(corner)
w_ratio = width / default_size
h_ratio = height / default_size
width = int(round(w_ratio * img_size))
height = int(round(h_ratio * img_size))
m = (height - 0) / (0 - width)
b = height
t = np.array([], dtype=np.int32)
for y in range(height):
x = (y - b) / m
t = np.append(t, int(round(x)))
t = np.flip(t, 0) if j > 1 else t
corner = t
for i, c in enumerate(corner):
offset = 0
i_offset = 0
if j % 2 != 0:
offset = img_size - c
if j > 1:
i_offset = img_size - len(corner)
x = np.repeat(i_offset + i, c)
height_indices = np.concatenate((height_indices, x))
y = np.array(range(offset, offset + c))
width_indices = np.concatenate((width_indices, y))
indices.append(
(
np.array(height_indices, dtype=np.int32),
np.array(width_indices, dtype=np.int32),
)
)
return indices
def apply_real_noise(depth_arr, size, indices=None):
"""
Applies noise to depth image to look more similar to the real depth camera
:param depth_arr: numpy square 2D array representing the depth
:param size: square size of array
:param indices: cached indices where noise is going to be applied, if None they get calculated
here based on the image size.
:return:
"""
if indices is None:
indices = generate_noise_indices(size)
for index_arr in indices:
depth_arr[index_arr] = 0.0
return depth_arr
| ai2thor-main | ai2thor/util/depth.py |
import os
from platform import system
if system() == "Windows":
class fcntl:
LOCK_UN = 0
LOCK_SH = 0
LOCK_NB = 0
LOCK_EX = 0
@staticmethod
def fcntl(fd, op, arg=0):
return 0
@staticmethod
def ioctl(fd, op, arg=0, mutable_flag=True):
return 0 if mutable_flag else ""
@staticmethod
def flock(fd, op):
return
@staticmethod
def lockf(fd, operation, length=0, start=0, whence=0):
return
else:
import fcntl
class Lock:
def __init__(self, target, mode):
self._lock_file_path = target + ".lock"
self._lock_file = os.open(self._lock_file_path, os.O_RDWR | os.O_CREAT)
self.mode = mode
def lock(self):
fcntl.lockf(self._lock_file, self.mode)
def unlock(self):
fcntl.lockf(self._lock_file, fcntl.LOCK_UN)
os.close(self._lock_file)
def unlink(self):
os.unlink(self._lock_file_path)
def __enter__(self):
self.lock()
def __exit__(self, exception_type, exception_value, exception_traceback):
self.unlock()
class LockEx(Lock):
def __init__(self, target, blocking=True):
mode = fcntl.LOCK_EX
if not blocking:
mode |= fcntl.LOCK_NB
super().__init__(target, mode)
class LockSh(Lock):
def __init__(self, target):
super().__init__(target, fcntl.LOCK_SH)
| ai2thor-main | ai2thor/util/lock.py |
import os
from pathlib import Path
TEST_SCENE = "FloorPlan28_physics"
TESTS_DIR = os.path.abspath(os.path.dirname(Path(__file__)))
TESTS_DATA_DIR = os.path.join(TESTS_DIR, "data")
| ai2thor-main | ai2thor/tests/constants.py |
ai2thor-main | ai2thor/tests/__init__.py |
|
import ai2thor.fifo_server
import pytest
import numpy as np
import msgpack
from ai2thor.tests.test_event import metadata_simple
from ai2thor.tests.fifo_client import FifoClient
from io import BytesIO
import copy
def test_multi_agent_train():
s = ai2thor.fifo_server.FifoServer(width=300, height=300)
s.send(dict(action="RotateRight"))
c = FifoClient(s.server_pipe_path, s.client_pipe_path)
msg = c.recv()
c.send(
ai2thor.fifo_server.FieldType.METADATA,
generate_multi_agent_metadata_payload(metadata_simple, s.sequence_id),
)
c.send_eom()
event = s.receive()
assert len(event.events) == 2
assert event.events[1].metadata == metadata_simple
def test_train_numpy_action():
s = ai2thor.fifo_server.FifoServer(width=300, height=300)
s.send(
dict(
action="Teleport",
rotation=dict(y=np.array([24])[0]),
moveMagnitude=np.array([55.5])[0],
myCustomArray=np.array([1, 2]),
)
)
c = FifoClient(s.server_pipe_path, s.client_pipe_path)
msg = c.recv()
assert msg == {
"action": "Teleport",
"rotation": {"y": 24},
"sequenceId": 1,
"moveMagnitude": 55.5,
"myCustomArray": [1, 2],
}
def generate_metadata_payload(metadata, sequence_id):
return msgpack.dumps(dict(agents=[metadata], sequenceId=sequence_id))
def generate_multi_agent_metadata_payload(metadata, sequence_id):
return msgpack.dumps(
dict(agents=[metadata, metadata], activeAgentId=1, sequenceId=sequence_id)
)
def test_simple():
s = ai2thor.fifo_server.FifoServer(width=300, height=300)
s.send(dict(action="RotateRight"))
c = FifoClient(s.server_pipe_path, s.client_pipe_path)
msg = c.recv()
assert msg == dict(action="RotateRight", sequenceId=s.sequence_id)
c.send(
ai2thor.fifo_server.FieldType.METADATA,
generate_metadata_payload(metadata_simple, s.sequence_id),
)
c.send_eom()
event = s.receive()
assert event.metadata == metadata_simple
def test_sequence_id_mismatch():
s = ai2thor.fifo_server.FifoServer(width=300, height=300)
s.send(dict(action="RotateRight"))
c = FifoClient(s.server_pipe_path, s.client_pipe_path)
msg = c.recv()
assert msg == dict(action="RotateRight", sequenceId=s.sequence_id)
c.send(
ai2thor.fifo_server.FieldType.METADATA,
generate_metadata_payload(metadata_simple, s.sequence_id + 1),
)
c.send_eom()
exception_caught = False
try:
event = s.receive()
except ValueError as e:
exception_caught = True
assert exception_caught
| ai2thor-main | ai2thor/tests/test_fifo_server.py |
import ai2thor.fifo_server
import struct
import json
class FifoClient:
def __init__(self, server_pipe_path, client_pipe_path):
self.server_pipe = None
self.client_pipe = None
self.server_pipe_path = server_pipe_path
self.client_pipe_path = client_pipe_path
def send(self, field_type, body):
if self.server_pipe is None:
self.server_pipe = open(self.server_pipe_path, "wb")
header = struct.pack(
ai2thor.fifo_server.FifoServer.header_format, field_type, len(body)
)
self.server_pipe.write(header + body)
def send_eom(self):
header = struct.pack(
ai2thor.fifo_server.FifoServer.header_format,
ai2thor.fifo_server.FieldType.END_OF_MESSAGE,
0,
)
self.server_pipe.write(header)
self.server_pipe.flush()
def recv(self):
# print("trying to receive")
if self.client_pipe is None:
self.client_pipe = open(self.client_pipe_path, "rb")
# print("trying to read")
j = None
while True:
header = self.client_pipe.read(
ai2thor.fifo_server.FifoServer.header_size
) # field_type + length
if len(header) == 0:
print("Read 0 - server closed")
break
# print("got header %s" % header)
# print("header length %s" % len(header))
field_type_int, field_length = struct.unpack(
ai2thor.fifo_server.FifoServer.header_format, header
)
field_type = ai2thor.fifo_server.FifoServer.field_types[field_type_int]
if field_length > 0: # EOM has length == 0
body = self.client_pipe.read(field_length)
if field_type is ai2thor.fifo_server.FieldType.ACTION:
j = json.loads(body)
elif field_type is ai2thor.fifo_server.FieldType.END_OF_MESSAGE:
# #print("got eom")
break
else:
raise Exception("invalid field %s" % field_type)
return j
| ai2thor-main | ai2thor/tests/fifo_client.py |
import os
from ai2thor.server import Event, DepthFormat
import warnings
import json
import numpy as np
import pytest
from ai2thor.tests.constants import TESTS_DATA_DIR, TEST_SCENE
metadata_complex = {
"agent": {
"bounds3D": [],
"cameraHorizon": 0.0,
"distance": 0.0,
"isopen": False,
"name": "agent",
"objectId": "",
"objectType": "",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {"x": -0.75, "y": 0.9799995422363281, "z": -0.25},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 180.0, "z": 0.0},
"visible": False,
},
"thirdPartyCameras": [],
"agentId": 0,
"collided": False,
"collidedObjects": [],
"colorBounds": [
{"bounds": [0, 0, 119, 299], "color": [138, 235, 7]},
{"bounds": [116, 0, 299, 99], "color": [127, 29, 203]},
{"bounds": [116, 0, 137, 64], "color": [237, 189, 33]},
{"bounds": [131, 0, 143, 55], "color": [97, 134, 44]},
{"bounds": [139, 0, 169, 71], "color": [193, 44, 202]},
{"bounds": [141, 0, 146, 30], "color": [96, 50, 133]},
{"bounds": [133, 0, 299, 85], "color": [89, 77, 61]},
{"bounds": [143, 0, 297, 34], "color": [214, 15, 78]},
{"bounds": [116, 0, 299, 99], "color": [115, 3, 101]},
{"bounds": [258, 12, 299, 84], "color": [96, 140, 59]},
{"bounds": [116, 14, 120, 28], "color": [162, 203, 153]},
{"bounds": [195, 15, 255, 85], "color": [108, 174, 95]},
{"bounds": [172, 17, 194, 71], "color": [168, 12, 250]},
{"bounds": [121, 18, 132, 30], "color": [246, 16, 151]},
{"bounds": [124, 29, 133, 40], "color": [116, 220, 170]},
{"bounds": [117, 31, 125, 63], "color": [115, 78, 181]},
{"bounds": [258, 35, 289, 43], "color": [241, 134, 252]},
{"bounds": [126, 39, 135, 49], "color": [114, 84, 146]},
{"bounds": [119, 44, 299, 203], "color": [73, 64, 168]},
{"bounds": [128, 48, 136, 57], "color": [185, 225, 171]},
{"bounds": [223, 54, 233, 69], "color": [14, 97, 183]},
{"bounds": [135, 56, 138, 74], "color": [96, 48, 36]},
{"bounds": [126, 69, 127, 69], "color": [66, 225, 0]},
{"bounds": [172, 72, 194, 84], "color": [191, 227, 85]},
{"bounds": [117, 77, 121, 78], "color": [92, 3, 233]},
{"bounds": [116, 81, 170, 96], "color": [177, 60, 44]},
{"bounds": [284, 91, 299, 123], "color": [110, 132, 248]},
{"bounds": [192, 92, 197, 97], "color": [36, 91, 74]},
{"bounds": [218, 92, 224, 97], "color": [56, 51, 197]},
{"bounds": [118, 93, 133, 101], "color": [72, 78, 219]},
{"bounds": [205, 93, 212, 99], "color": [178, 18, 13]},
{"bounds": [116, 95, 117, 106], "color": [60, 103, 95]},
{"bounds": [184, 95, 203, 106], "color": [42, 54, 156]},
{"bounds": [210, 95, 217, 103], "color": [214, 68, 168]},
{"bounds": [121, 96, 124, 118], "color": [226, 66, 148]},
{"bounds": [160, 96, 177, 101], "color": [135, 13, 200]},
{"bounds": [233, 96, 237, 103], "color": [127, 73, 96]},
{"bounds": [246, 96, 253, 102], "color": [18, 240, 113]},
{"bounds": [118, 97, 133, 120], "color": [110, 250, 103]},
{"bounds": [149, 97, 154, 105], "color": [44, 186, 193]},
{"bounds": [201, 97, 209, 115], "color": [118, 102, 24]},
{"bounds": [213, 97, 221, 115], "color": [182, 114, 149]},
{"bounds": [224, 97, 231, 103], "color": [20, 107, 195]},
{"bounds": [233, 97, 242, 110], "color": [219, 74, 174]},
{"bounds": [120, 98, 125, 106], "color": [202, 218, 132]},
{"bounds": [133, 98, 138, 110], "color": [122, 156, 16]},
{"bounds": [245, 99, 253, 112], "color": [216, 69, 22]},
{"bounds": [186, 107, 189, 108], "color": [34, 152, 164]},
{"bounds": [257, 107, 260, 108], "color": [48, 42, 241]},
{"bounds": [167, 108, 219, 187], "color": [92, 62, 94]},
{"bounds": [145, 109, 152, 113], "color": [17, 67, 188]},
{"bounds": [55, 134, 160, 298], "color": [216, 148, 75]},
{"bounds": [115, 136, 146, 203], "color": [181, 237, 187]},
{"bounds": [109, 189, 113, 210], "color": [104, 199, 254]},
{"bounds": [103, 195, 108, 219], "color": [238, 221, 39]},
{"bounds": [92, 201, 102, 239], "color": [36, 61, 25]},
{"bounds": [117, 202, 137, 208], "color": [143, 211, 227]},
{"bounds": [55, 202, 299, 299], "color": [55, 223, 207]},
{"bounds": [107, 210, 112, 218], "color": [135, 101, 149]},
{"bounds": [73, 213, 91, 269], "color": [1, 209, 145]},
{"bounds": [46, 234, 72, 299], "color": [215, 152, 183]},
{"bounds": [11, 263, 45, 299], "color": [45, 75, 161]},
],
"colors": [
{"color": [58, 205, 56], "name": "Bowl|-00.16|+01.50|-01.45"},
{"color": [209, 182, 193], "name": "Bowl"},
{"color": [226, 29, 217], "name": "Container|-00.16|+00.93|-02.94"},
{"color": [14, 114, 120], "name": "Container"},
{"color": [219, 14, 164], "name": "Ladel1.001"},
{"color": [138, 235, 7], "name": "Fridge|-00.22|00.00|-00.83"},
{"color": [91, 156, 207], "name": "Fridge1"},
{"color": [181, 237, 187], "name": "Cabinet|-00.35|+01.89|-03.29"},
{"color": [210, 149, 89], "name": "Drawer"},
{"color": [237, 189, 33], "name": "StoveBase1"},
{"color": [216, 148, 75], "name": "Cube.090"},
{"color": [117, 7, 236], "name": "Toaster|-00.16|+00.93|-01.45"},
{"color": [55, 33, 114], "name": "Toaster1"},
{"color": [215, 152, 183], "name": "Cabinet|-00.34|+01.89|-01.29"},
{"color": [44, 186, 193], "name": "Mug|-00.78|+00.93|-03.85"},
{"color": [8, 94, 186], "name": "CoffeeCup1"},
{"color": [122, 156, 16], "name": "Bottle5.001"},
{"color": [116, 220, 170], "name": "StoveKnob|-00.62|+00.90|-01.98"},
{"color": [106, 252, 95], "name": "StoveKnob2_Range4"},
{"color": [41, 198, 116], "name": "Spatula2.001"},
{"color": [119, 173, 49], "name": "Torus"},
{"color": [168, 12, 250], "name": "Cabinet|-01.01|+00.39|-03.37"},
{"color": [61, 44, 125], "name": "Microwave|-00.17|+01.49|-02.06"},
{"color": [54, 96, 202], "name": "Microwave4"},
{"color": [240, 130, 222], "name": "StoveBurner|-00.23|+00.93|-01.85"},
{"color": [156, 249, 101], "name": "GasStoveTop_Range1"},
{"color": [72, 78, 219], "name": "Sphere.010"},
{"color": [255, 102, 152], "name": "StoveBurner|-00.42|+00.93|-02.26"},
{"color": [248, 115, 142], "name": "StoveBurner|-00.23|+00.93|-02.26"},
{"color": [135, 13, 200], "name": "TurkeyPan.005"},
{"color": [45, 75, 161], "name": "Cabinet|-00.34|+02.11|-01.27"},
{"color": [92, 3, 233], "name": "Spatula1.002"},
{"color": [96, 50, 133], "name": "Towl1 (1)"},
{"color": [143, 211, 227], "name": "Cylinder.028"},
{"color": [108, 174, 95], "name": "Cube.085"},
{"color": [34, 152, 164], "name": "SugarJar.005"},
{"color": [96, 48, 36], "name": "Cabinet|-00.48|+00.78|-02.74"},
{"color": [131, 29, 70], "name": "Ladel3.001"},
{"color": [55, 223, 207], "name": "Ceiling"},
{"color": [102, 49, 87], "name": "Knife|-00.14|+01.12|-02.75"},
{"color": [211, 157, 122], "name": "Knife1"},
{"color": [177, 60, 44], "name": "Cube.100"},
{"color": [114, 84, 146], "name": "StoveKnob|-00.62|+00.90|-02.13"},
{"color": [60, 103, 95], "name": "Bottle3.001"},
{"color": [186, 206, 150], "name": "PaperRoll1"},
{"color": [164, 253, 150], "name": "Sphere.012"},
{"color": [77, 4, 136], "name": "Spatula1.001"},
{"color": [135, 101, 149], "name": "TurkeyPan.006"},
{"color": [237, 39, 71], "name": "Decals.002"},
{"color": [226, 66, 148], "name": "Bottle4.001"},
{"color": [246, 16, 151], "name": "StoveKnob|-00.62|+00.90|-01.83"},
{"color": [36, 91, 74], "name": "Tomato|-01.32|+00.93|-03.53"},
{"color": [119, 189, 121], "name": "Tomato"},
{"color": [193, 44, 202], "name": "Cabinet|-00.63|+00.39|-03.01"},
{"color": [118, 102, 24], "name": "SugarJar.004"},
{"color": [92, 62, 94], "name": "VenetianFrame"},
{"color": [14, 97, 183], "name": "Towl1"},
{"color": [87, 195, 41], "name": "GarbageCan|-00.36|00.00|-00.21"},
{"color": [225, 40, 55], "name": "GarbageCan"},
{"color": [110, 132, 248], "name": "CoffeeMachine|-02.65|+00.93|-03.57"},
{"color": [147, 71, 238], "name": "CoffeeMachine2"},
{"color": [214, 15, 78], "name": "Floor"},
{"color": [73, 64, 168], "name": "Room"},
{"color": [89, 77, 61], "name": "Cube.086"},
{"color": [127, 29, 203], "name": "Cube.082"},
{"color": [97, 134, 44], "name": "StoveTopDoor1"},
{"color": [140, 135, 166], "name": "Fork|-00.48|+00.81|-02.74"},
{"color": [54, 200, 25], "name": "Fork1"},
{"color": [185, 225, 171], "name": "StoveKnob|-00.62|+00.90|-02.29"},
{"color": [91, 94, 10], "name": "Egg|-00.21|+00.27|-00.83"},
{"color": [240, 75, 163], "name": "Egg"},
{"color": [162, 203, 153], "name": "Mug|-00.53|+00.93|-01.58"},
{"color": [1, 209, 145], "name": "Cabinet|-00.34|+02.11|-01.63"},
{"color": [104, 199, 254], "name": "Cabinet|-00.33|+01.89|-03.24"},
{"color": [29, 84, 249], "name": "Spoon|-00.50|+00.78|-01.45"},
{"color": [235, 57, 90], "name": "Spoon"},
{"color": [115, 3, 101], "name": "Decals.003"},
{"color": [71, 3, 53], "name": "Sphere.008"},
{"color": [191, 227, 85], "name": "Cabinet|-01.15|+00.78|-03.50"},
{"color": [238, 221, 39], "name": "Cabinet|-00.33|+01.89|-02.51"},
{"color": [18, 240, 113], "name": "SugarFill.006"},
{"color": [36, 61, 25], "name": "Cabinet|-00.34|+02.11|-02.50"},
{"color": [214, 68, 168], "name": "Mug|-01.63|+00.92|-03.74"},
{"color": [17, 67, 188], "name": "Outlet (1)"},
{"color": [66, 225, 0], "name": "ButterKnife|-00.43|+00.93|-02.60"},
{"color": [135, 147, 55], "name": "butterKnife"},
{"color": [115, 78, 181], "name": "StoveTopGas"},
{"color": [182, 114, 149], "name": "SugarJar.001"},
{"color": [139, 56, 140], "name": "StoveBottomDoor1"},
{"color": [202, 218, 132], "name": "Cube.109"},
{"color": [178, 18, 13], "name": "Apple|-01.49|+00.93|-03.50"},
{"color": [159, 98, 144], "name": "Apple"},
{"color": [20, 107, 195], "name": "SugarFill.001"},
{"color": [193, 221, 101], "name": "Plate|-00.15|+01.49|-02.73"},
{"color": [188, 154, 128], "name": "Plate"},
{"color": [55, 176, 84], "name": "Cabinet|-00.63|+00.39|-01.61"},
{"color": [145, 107, 85], "name": "Cabinet|-00.34|+02.11|-00.39"},
{"color": [138, 185, 132], "name": "SugarJar.003"},
{"color": [202, 210, 177], "name": "Bottle2.001"},
{"color": [141, 139, 54], "name": "Cabinet|-00.63|+00.39|-02.51"},
{"color": [96, 140, 59], "name": "Chair|-02.35|00.00|-03.60"},
{"color": [166, 13, 176], "name": "Chair5"},
{"color": [199, 148, 125], "name": "Bottle1.001"},
{"color": [34, 126, 70], "name": "ladel2.001"},
{"color": [48, 42, 241], "name": "SugarJar.006"},
{"color": [127, 73, 96], "name": "SugarFill.004"},
{"color": [219, 74, 174], "name": "Sugar.001"},
{"color": [216, 69, 22], "name": "SugarJar.002"},
{"color": [31, 88, 95], "name": "StoveBurner|-00.42|+00.93|-01.85"},
{"color": [193, 143, 140], "name": "Outlet"},
{"color": [97, 114, 178], "name": "Sphere.001"},
{"color": [56, 51, 197], "name": "Potato|-01.63|+00.93|-03.48"},
{"color": [187, 142, 9], "name": "Potato"},
{"color": [42, 54, 156], "name": "Bread|-01.33|+00.93|-03.71"},
{"color": [18, 150, 252], "name": "Bread"},
{"color": [195, 218, 223], "name": "Cabinet|-00.50|+00.78|-01.45"},
{"color": [34, 130, 237], "name": "Pot|-00.47|+00.08|-02.74"},
{"color": [132, 237, 87], "name": "Pot1"},
{"color": [110, 250, 103], "name": "Bottles.001"},
{"color": [4, 93, 193], "name": "Lettuce|-00.33|+00.74|-00.69"},
{"color": [203, 156, 88], "name": "Lettuce1"},
{"color": [241, 134, 252], "name": "Baseboard.020"},
{"color": [127, 127, 189], "name": "Pan|-00.68|+00.08|-03.27"},
{"color": [246, 212, 161], "name": "Pan1"},
{"color": [207, 119, 70], "name": "Spatula3.001"},
],
"errorCode": "",
"errorMessage": "",
"inventoryObjects": [],
"lastAction": "RotateRight",
"lastActionSuccess": True,
"objects": [
{
"bounds3D": [
-2.5750010013580322,
0.8563164472579956,
-3.647000312805176,
-1.5749990940093994,
0.9563164710998535,
-3.3069992065429688,
],
"cameraHorizon": 0.0,
"distance": 3.6240997314453125,
"isopen": False,
"name": "Tabletop",
"objectId": "TableTop|-02.08|+00.94|-03.62",
"objectType": "TableTop",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -2.075000047683716,
"y": 0.9433164596557617,
"z": -3.622999906539917,
},
"receptacle": True,
"receptacleCount": 4,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 90.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.2521742284297943,
1.4949759244918823,
-2.831829071044922,
-0.05024271458387375,
1.5067294836044312,
-2.6298975944519043,
],
"cameraHorizon": 0.0,
"distance": 2.6035001277923584,
"isopen": False,
"name": "Plate",
"objectId": "Plate|-00.15|+01.49|-02.73",
"objectType": "Plate",
"openable": False,
"parentReceptacle": "Cabinet|-00.33|+01.89|-02.51",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.15120847523212433,
"y": 1.494760513305664,
"z": -2.730863332748413,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6563448309898376,
0.8580825328826904,
-2.015467643737793,
-0.576196014881134,
0.9382582902908325,
-1.9353333711624146,
],
"cameraHorizon": 0.0,
"distance": 1.7323315143585205,
"isopen": False,
"name": "StoveKnob2_Range2",
"objectId": "StoveKnob|-00.62|+00.90|-01.98",
"objectType": "StoveKnob",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6176999807357788,
"y": 0.8996000289916992,
"z": -1.9753999710083008,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406},
"visible": False,
},
{
"bounds3D": [
-1.3614451885223389,
0.9283196926116943,
-3.5663928985595703,
-1.2814817428588867,
0.9905622005462646,
-3.486574649810791,
],
"cameraHorizon": 0.0,
"distance": 3.3262617588043213,
"isopen": False,
"name": "Tomato",
"objectId": "Tomato|-01.32|+00.93|-03.53",
"objectType": "Tomato",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.3221999406814575,
"y": 0.9303702116012573,
"z": -3.5262999534606934,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.7945087552070618,
0.07984550297260284,
-3.400216579437256,
-0.5677620768547058,
0.12984557449817657,
-3.1494078636169434,
],
"cameraHorizon": 0.0,
"distance": 3.1552624702453613,
"isopen": False,
"name": "Pan1",
"objectId": "Pan|-00.68|+00.08|-03.27",
"objectType": "Pan",
"openable": False,
"parentReceptacle": "Cabinet|-00.63|+00.39|-03.01",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6810178160667419,
"y": 0.08484554290771484,
"z": -3.274834156036377,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {
"x": -6.1288878896448296e-06,
"y": 280.44842529296875,
"z": 1.398907170369057e-05,
},
"visible": False,
},
{
"bounds3D": [
-0.21095620095729828,
0.9303669929504395,
-2.992823362350464,
-0.09956331551074982,
1.1846275329589844,
-2.8814303874969482,
],
"cameraHorizon": 0.0,
"distance": 2.7526044845581055,
"isopen": False,
"name": "Container",
"objectId": "Container|-00.16|+00.93|-02.94",
"objectType": "Container",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.15525996685028076,
"y": 0.9303703308105469,
"z": -2.937127113342285,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.40836191177368164,
0.14085793495178223,
-1.15748929977417,
0.030406057834625244,
1.7145073413848877,
-0.5005106925964355,
],
"cameraHorizon": 0.0,
"distance": 1.2551215887069702,
"isopen": False,
"name": "Fridge1",
"objectId": "Fridge|-00.22|00.00|-00.83",
"objectType": "Fridge",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [
{"objectId": "Egg|-00.21|+00.27|-00.83", "pivotId": 0},
{"objectId": "Lettuce|-00.33|+00.74|-00.69", "pivotId": 1},
],
"position": {
"x": -0.22300000488758087,
"y": -0.0010000000474974513,
"z": -0.8289999961853027,
},
"receptacle": True,
"receptacleCount": 6,
"receptacleObjectIds": [
"Egg|-00.21|+00.27|-00.83",
"Lettuce|-00.33|+00.74|-00.69",
],
"rotation": {"x": 0.0, "y": 270.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6255507469177246,
0.8067288994789124,
-2.7551281452178955,
-0.38278937339782715,
0.826447069644928,
-2.7230093479156494,
],
"cameraHorizon": 0.0,
"distance": 2.509014844894409,
"isopen": False,
"name": "Fork1",
"objectId": "Fork|-00.48|+00.81|-02.74",
"objectType": "Fork",
"openable": False,
"parentReceptacle": "Cabinet|-00.48|+00.78|-02.74",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.48289254307746887,
"y": 0.8116353750228882,
"z": -2.7390687465667725,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.553860604763031,
0.2711416482925415,
-0.4028606414794922,
-0.16013938188552856,
0.6648629307746887,
-0.00913935899734497,
],
"cameraHorizon": 0.0,
"distance": 1.0567800998687744,
"isopen": False,
"name": "GarbageCan",
"objectId": "GarbageCan|-00.36|00.00|-00.21",
"objectType": "GarbageCan",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.3569999933242798,
"y": -3.196139175543067e-08,
"z": -0.20600000023841858,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.8528260588645935,
0.9309259057044983,
-3.9095852375030518,
-0.714918315410614,
1.0337982177734375,
-3.7689216136932373,
],
"cameraHorizon": 0.0,
"distance": 3.6004319190979004,
"isopen": False,
"name": "CoffeeCup1",
"objectId": "Mug|-00.78|+00.93|-03.85",
"objectType": "Mug",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.7749999761581421,
"y": 0.9301429986953735,
"z": -3.8499999046325684,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 50.4573860168457, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.19851021468639374,
0.9635931253433228,
-2.7536282539367676,
-0.09219704568386078,
1.3012911081314087,
-2.7334327697753906,
],
"cameraHorizon": 0.0,
"distance": 2.5751969814300537,
"isopen": False,
"name": "Knife1",
"objectId": "Knife|-00.14|+01.12|-02.75",
"objectType": "Knife",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.14190000295639038,
"y": 1.117300033569336,
"z": -2.7486000061035156,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 10.637146949768066, "y": 274.3685607910156, "z": 270.0},
"visible": False,
},
{
"bounds3D": [
-0.5118284225463867,
0.9333651065826416,
-1.9365284442901611,
-0.3299715518951416,
0.9572690725326538,
-1.754671573638916,
],
"cameraHorizon": 0.0,
"distance": 1.629948377609253,
"isopen": False,
"name": "GasStoveTop_Range1",
"objectId": "StoveBurner|-00.42|+00.93|-01.85",
"objectType": "StoveBurner",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.42089998722076416,
"y": 0.9301429986953735,
"z": -1.8456000089645386,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.2595430612564087,
1.4952101707458496,
-1.5506460666656494,
-0.06338601559400558,
1.5541222095489502,
-1.3544890880584717,
],
"cameraHorizon": 0.0,
"distance": 1.4347065687179565,
"isopen": False,
"name": "Bowl",
"objectId": "Bowl|-00.16|+01.50|-01.45",
"objectType": "Bowl",
"openable": False,
"parentReceptacle": "Cabinet|-00.34|+01.89|-01.29",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.16146452724933624,
"y": 1.495596170425415,
"z": -1.45256769657135,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6566448211669922,
0.8584824800491333,
-2.3290677070617676,
-0.5764960050582886,
0.9386582374572754,
-2.2489333152770996,
],
"cameraHorizon": 0.0,
"distance": 2.0448336601257324,
"isopen": False,
"name": "StoveKnob2_Range4",
"objectId": "StoveKnob|-00.62|+00.90|-02.29",
"objectType": "StoveKnob",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6179999709129333,
"y": 0.8999999761581421,
"z": -2.2890000343322754,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406},
"visible": False,
},
{
"bounds3D": [
-0.2558910846710205,
0.9301429390907288,
-1.6137478351593018,
-0.0713789314031601,
1.1241569519042969,
-1.2920067310333252,
],
"cameraHorizon": 0.0,
"distance": 1.3391128778457642,
"isopen": False,
"name": "Toaster1",
"objectId": "Toaster|-00.16|+00.93|-01.45",
"objectType": "Toaster",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.1636350154876709,
"y": 0.9301429986953735,
"z": -1.4528772830963135,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.665656328201294,
0.924782931804657,
-3.7827463150024414,
-1.5564723014831543,
1.0276552438735962,
-3.6940536499023438,
],
"cameraHorizon": 0.0,
"distance": 3.596900701522827,
"isopen": False,
"name": "CoffeeCup1",
"objectId": "Mug|-01.63|+00.92|-03.74",
"objectType": "Mug",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.625,
"y": 0.9240000247955322,
"z": -3.7383999824523926,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 180.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.29263991117477417,
1.5244276523590088,
-2.8414499759674072,
-0.16177701950073242,
2.2490928173065186,
-2.5138638019561768,
],
"cameraHorizon": 0.0,
"distance": 2.4750850200653076,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.33|+01.89|-02.51",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Plate|-00.15|+01.49|-02.73", "pivotId": 0}],
"position": {
"x": -0.3272084593772888,
"y": 1.8867602348327637,
"z": -2.5138635635375977,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Plate|-00.15|+01.49|-02.73"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6222020983695984,
0.7248871326446533,
-1.614982008934021,
-0.6195090413093567,
0.8706167936325073,
-1.2865678071975708,
],
"cameraHorizon": 0.0,
"distance": 1.2426241636276245,
"isopen": False,
"name": "Drawer",
"objectId": "Cabinet|-00.50|+00.78|-01.45",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Spoon|-00.50|+00.78|-01.45", "pivotId": 0}],
"position": {
"x": -0.5008437633514404,
"y": 0.7795612812042236,
"z": -1.450774908065796,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Spoon|-00.50|+00.78|-01.45"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5953136682510376,
0.09301626682281494,
-1.6149822473526,
-0.4644508361816406,
0.6846745014190674,
-1.3194092512130737,
],
"cameraHorizon": 0.0,
"distance": 1.4923365116119385,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.63|+00.39|-01.61",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6298819780349731,
"y": 0.3888453245162964,
"z": -1.6149822473526,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.2881675958633423,
0.7248872518539429,
-3.3793442249298096,
-1.0107892751693726,
0.8706167936325073,
-3.376683473587036,
],
"cameraHorizon": 0.0,
"distance": 3.2784500122070312,
"isopen": False,
"name": "Drawer",
"objectId": "Cabinet|-01.15|+00.78|-03.50",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -1.1494783163070679,
"y": 0.7825552225112915,
"z": -3.4980251789093018,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-3.5819432735443115,
0.09301620721817017,
-3.3748939037323,
-0.9107897281646729,
0.6846743822097778,
-3.362663507461548,
],
"cameraHorizon": 0.0,
"distance": 3.185004711151123,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-01.01|+00.39|-03.37",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -1.010789155960083,
"y": 0.3888453245162964,
"z": -3.368778705596924,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.8397345542907715,
0.09301596879959106,
-3.5855960845947266,
-0.3782111406326294,
0.6846745014190674,
-3.124072551727295,
],
"cameraHorizon": 0.0,
"distance": 2.823883056640625,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.63|+00.39|-03.01",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Pan|-00.68|+00.08|-03.27", "pivotId": 0}],
"position": {
"x": -0.6330178380012512,
"y": 0.3888453245162964,
"z": -3.0088343620300293,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Pan|-00.68|+00.08|-03.27"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5953132510185242,
0.09301614761352539,
-2.9192330837249756,
-0.4644504189491272,
0.6846743822097778,
-2.5138638019561768,
],
"cameraHorizon": 0.0,
"distance": 2.342855215072632,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.63|+00.39|-02.51",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Pot|-00.47|+00.08|-02.74", "pivotId": 0}],
"position": {
"x": -0.6298820972442627,
"y": 0.3888453245162964,
"z": -2.5138638019561768,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Pot|-00.47|+00.08|-02.74"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6035346984863281,
0.7248871326446533,
-2.9642739295959473,
-0.6004599332809448,
0.8706167936325073,
-2.5138635635375977,
],
"cameraHorizon": 0.0,
"distance": 2.5116219520568848,
"isopen": False,
"name": "Drawer",
"objectId": "Cabinet|-00.48|+00.78|-02.74",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Fork|-00.48|+00.81|-02.74", "pivotId": 0}],
"position": {
"x": -0.4819878041744232,
"y": 0.777635395526886,
"z": -2.7390687465667725,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Fork|-00.48|+00.81|-02.74"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6152604818344116,
1.5292630195617676,
-3.8681092262268066,
-0.15373694896697998,
2.2539286613464355,
-3.406585216522217,
],
"cameraHorizon": 0.0,
"distance": 3.2024600505828857,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.35|+01.89|-03.29",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.34654390811920166,
"y": 1.8915960788726807,
"z": -3.2933475971221924,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.3028959631919861,
1.5292634963989258,
-1.5821408033370972,
-0.17203307151794434,
2.2539284229278564,
-1.2865678071975708,
],
"cameraHorizon": 0.0,
"distance": 1.4407174587249756,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+01.89|-01.29",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Bowl|-00.16|+01.50|-01.45", "pivotId": 0}],
"position": {
"x": -0.33746451139450073,
"y": 1.8915960788726807,
"z": -1.2865678071975708,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Bowl|-00.16|+01.50|-01.45"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.33359596133232117,
1.9445738792419434,
-2.497605323791504,
-0.20273306965827942,
2.275726795196533,
-2.12178373336792,
],
"cameraHorizon": 0.0,
"distance": 2.549344301223755,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+02.11|-02.50",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.33746451139450073,
"y": 2.1101503372192383,
"z": -2.497605323791504,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.33359596133232117,
1.9445738792419434,
-2.0148353576660156,
-0.20273306965827942,
2.275726795196533,
-1.631803035736084,
],
"cameraHorizon": 0.0,
"distance": 1.8321586847305298,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+02.11|-01.63",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.33746451139450073,
"y": 2.1101503372192383,
"z": -1.6318029165267944,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.334695965051651,
1.9445741176605225,
-1.2722522020339966,
-0.20383307337760925,
2.275726556777954,
-0.909758448600769,
],
"cameraHorizon": 0.0,
"distance": 1.5787419080734253,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+02.11|-01.27",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.33746451139450073,
"y": 2.1101503372192383,
"z": -1.2722522020339966,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.334695965051651,
1.9445738792419434,
-0.7808091640472412,
-0.20383307337760925,
2.275726795196533,
-0.3908956050872803,
],
"cameraHorizon": 0.0,
"distance": 1.2113124132156372,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+02.11|-00.39",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.33746451139450073,
"y": 2.1101503372192383,
"z": -0.39089563488960266,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.29263991117477417,
1.524427890777588,
-3.242128849029541,
-0.16177701950073242,
2.2490928173065186,
-2.9145426750183105,
],
"cameraHorizon": 0.0,
"distance": 3.1549649238586426,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.33|+01.89|-03.24",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.3272084593772888,
"y": 1.8867603540420532,
"z": -3.24212908744812,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.0901057720184326,
0.7320617437362671,
-3.888105630874634,
-0.12189435958862305,
0.952538251876831,
-2.9198944568634033,
],
"cameraHorizon": 0.0,
"distance": 3.1575143337249756,
"isopen": False,
"name": "Sink",
"objectId": "Sink|-00.61|+00.94|-03.40",
"objectType": "Sink",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6060000061988831,
"y": 0.9419999718666077,
"z": -3.4040000438690186,
},
"receptacle": True,
"receptacleCount": 4,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 44.999996185302734, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.24254396557807922,
0.2711706757545471,
-0.8578107357025146,
-0.18492531776428223,
0.3472771644592285,
-0.8001892566680908,
],
"cameraHorizon": 0.0,
"distance": 1.06029212474823,
"isopen": False,
"name": "Egg",
"objectId": "Egg|-00.21|+00.27|-00.83",
"objectType": "Egg",
"openable": False,
"parentReceptacle": "Fridge|-00.22|00.00|-00.83",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.2137332558631897,
"y": 0.2719060778617859,
"z": -0.8289999961853027,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 270.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.5313434600830078,
0.9396243691444397,
-3.5390284061431885,
-1.444072961807251,
1.0310288667678833,
-3.452800989151001,
],
"cameraHorizon": 0.0,
"distance": 3.3288652896881104,
"isopen": False,
"name": "Apple",
"objectId": "Apple|-01.49|+00.93|-03.50",
"objectType": "Apple",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.4870775938034058,
"y": 0.9303702116012573,
"z": -3.495858669281006,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.42987868189811707,
0.7445617914199829,
-0.7644813060760498,
-0.27457037568092346,
0.8978313207626343,
-0.614234447479248,
],
"cameraHorizon": 0.0,
"distance": 0.7373902201652527,
"isopen": False,
"name": "Lettuce1",
"objectId": "Lettuce|-00.33|+00.74|-00.69",
"objectType": "Lettuce",
"openable": False,
"parentReceptacle": "Fridge|-00.22|00.00|-00.83",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.2137332707643509,
"y": 0.7358768582344055,
"z": -0.6933581233024597,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 270.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6563448309898376,
0.8579825162887573,
-1.8734675645828247,
-0.576196014881134,
0.9381582736968994,
-1.7933334112167358,
],
"cameraHorizon": 0.0,
"distance": 1.590955376625061,
"isopen": False,
"name": "StoveKnob2_Range1",
"objectId": "StoveKnob|-00.62|+00.90|-01.83",
"objectType": "StoveKnob",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6176999807357788,
"y": 0.8995000123977661,
"z": -1.833400011062622,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406},
"visible": False,
},
{
"bounds3D": [
-0.6007806062698364,
0.9309259057044983,
-1.624263048171997,
-0.4915965795516968,
1.0337982177734375,
-1.5355703830718994,
],
"cameraHorizon": 0.0,
"distance": 1.3485466241836548,
"isopen": False,
"name": "CoffeeCup1",
"objectId": "Mug|-00.53|+00.93|-01.58",
"objectType": "Mug",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.5322529077529907,
"y": 0.9301429986953735,
"z": -1.5799167156219482,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.3178284764289856,
0.9333651065826416,
-2.3485283851623535,
-0.1359715461730957,
0.9572690725326538,
-2.1666717529296875,
],
"cameraHorizon": 0.0,
"distance": 2.0752294063568115,
"isopen": False,
"name": "GasStoveTop_Range3",
"objectId": "StoveBurner|-00.23|+00.93|-02.26",
"objectType": "StoveBurner",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.22689999639987946,
"y": 0.9301429986953735,
"z": -2.2576000690460205,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5608127117156982,
0.9253336787223816,
-2.6081254482269287,
-0.2908085584640503,
0.9346393942832947,
-2.578345537185669,
],
"cameraHorizon": 0.0,
"distance": 2.369608163833618,
"isopen": False,
"name": "butterKnife",
"objectId": "ButterKnife|-00.43|+00.93|-02.60",
"objectType": "ButterKnife",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.4278929829597473,
"y": 0.9303703904151917,
"z": -2.5970890522003174,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.4711631536483765,
0.9296106696128845,
-3.788638114929199,
-1.1927717924118042,
1.0843539237976074,
-3.621340751647949,
],
"cameraHorizon": 0.0,
"distance": 3.504027843475342,
"isopen": False,
"name": "Bread",
"objectId": "Bread|-01.33|+00.93|-03.71",
"objectType": "Bread",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.3320000171661377,
"y": 0.9303702712059021,
"z": -3.7049999237060547,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 6.309757232666016, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6563448309898376,
0.8581824898719788,
-2.1692676544189453,
-0.576196014881134,
0.9383582472801208,
-2.0891332626342773,
],
"cameraHorizon": 0.0,
"distance": 1.8855619430541992,
"isopen": False,
"name": "StoveKnob2_Range3",
"objectId": "StoveKnob|-00.62|+00.90|-02.13",
"objectType": "StoveKnob",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6176999807357788,
"y": 0.8996999859809875,
"z": -2.129199981689453,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406},
"visible": False,
},
{
"bounds3D": [
-1.6801782846450806,
0.9300780892372131,
-3.5211691856384277,
-1.5957564115524292,
1.001486897468567,
-3.4346466064453125,
],
"cameraHorizon": 0.0,
"distance": 3.3443284034729004,
"isopen": False,
"name": "Potato",
"objectId": "Potato|-01.63|+00.93|-03.48",
"objectType": "Potato",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.6319999694824219,
"y": 0.9303702116012573,
"z": -3.475545883178711,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.3178284764289856,
0.9333651065826416,
-1.9365284442901611,
-0.1359715461730957,
0.9572690725326538,
-1.754671573638916,
],
"cameraHorizon": 0.0,
"distance": 1.6798983812332153,
"isopen": False,
"name": "GasStoveTop_Range2",
"objectId": "StoveBurner|-00.23|+00.93|-01.85",
"objectType": "StoveBurner",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.22689999639987946,
"y": 0.9301429986953735,
"z": -1.8456000089645386,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-2.784135103225708,
0.9281330108642578,
-3.721567153930664,
-2.5158650875091553,
1.3016245365142822,
-3.4185357093811035,
],
"cameraHorizon": 0.0,
"distance": 3.8290257453918457,
"isopen": False,
"name": "CoffeeMachine2",
"objectId": "CoffeeMachine|-02.65|+00.93|-03.57",
"objectType": "CoffeeMachine",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -2.6500000953674316,
"y": 0.9303701519966125,
"z": -3.5739998817443848,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6211026906967163,
0.7797816395759583,
-1.4715903997421265,
-0.41446253657341003,
0.7992590069770813,
-1.4300788640975952,
],
"cameraHorizon": 0.0,
"distance": 1.2420284748077393,
"isopen": False,
"name": "Spoon",
"objectId": "Spoon|-00.50|+00.78|-01.45",
"objectType": "Spoon",
"openable": False,
"parentReceptacle": "Cabinet|-00.50|+00.78|-01.45",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.4998437762260437,
"y": 0.784561276435852,
"z": -1.450774908065796,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5118284225463867,
0.9333651065826416,
-2.3485283851623535,
-0.3299715518951416,
0.9572690725326538,
-2.1666717529296875,
],
"cameraHorizon": 0.0,
"distance": 2.035006284713745,
"isopen": False,
"name": "GasStoveTop_Range4",
"objectId": "StoveBurner|-00.42|+00.93|-02.26",
"objectType": "StoveBurner",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.42089998722076416,
"y": 0.9301429986953735,
"z": -2.2576000690460205,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5738816261291504,
0.0948454737663269,
-2.837768316268921,
-0.37388163805007935,
0.2948455214500427,
-2.637768030166626,
],
"cameraHorizon": 0.0,
"distance": 2.6583845615386963,
"isopen": False,
"name": "Pot1",
"objectId": "Pot|-00.47|+00.08|-02.74",
"objectType": "Pot",
"openable": False,
"parentReceptacle": "Cabinet|-00.63|+00.39|-02.51",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.4738820791244507,
"y": 0.08484548330307007,
"z": -2.737863779067993,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-2.613636016845703,
0.0006269514560699463,
-3.853076219558716,
-2.085458755493164,
0.874946117401123,
-3.286182165145874,
],
"cameraHorizon": 0.0,
"distance": 3.8430612087249756,
"isopen": False,
"name": "Chair5",
"objectId": "Chair|-02.35|00.00|-03.60",
"objectType": "Chair",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -2.3540000915527344,
"y": -5.653919288306497e-07,
"z": -3.6019999980926514,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 74.2330551147461, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.3505246043205261,
1.5073667764663696,
-2.2319486141204834,
0.009090721607208252,
1.8599165678024292,
-1.720513105392456,
],
"cameraHorizon": 0.0,
"distance": 1.961709976196289,
"isopen": False,
"name": "Microwave4",
"objectId": "Microwave|-00.17|+01.49|-02.06",
"objectType": "Microwave",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.1746000051498413,
"y": 1.485553503036499,
"z": -2.055999994277954,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
],
"sceneName": "FloorPlan28",
"screenHeight": 300,
"screenWidth": 300,
}
metadata_simple = {
"agent": {
"bounds3D": [],
"cameraHorizon": 0.0,
"distance": 0.0,
"isopen": False,
"name": "agent",
"objectId": "",
"objectType": "",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {"x": -0.75, "y": 1.0, "z": -0.25},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
"agentId": 0,
"thirdPartyCameras": [],
"collided": False,
"collidedObjects": [],
"colorBounds": [],
"colors": [],
"errorCode": "",
"errorMessage": "",
"inventoryObjects": [],
"lastAction": "",
"lastActionSuccess": False,
"objects": [
{
"bounds3D": [
-2.5750010013580322,
0.8563164472579956,
-3.647000312805176,
-1.5749990940093994,
0.9563164710998535,
-3.3069992065429688,
],
"cameraHorizon": 0.0,
"distance": 3.6243574619293213,
"isopen": False,
"name": "Tabletop",
"objectId": "TableTop|-02.08|+00.94|-03.62",
"objectType": "TableTop",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -2.075000047683716,
"y": 0.9433164596557617,
"z": -3.622999906539917,
},
"receptacle": True,
"receptacleCount": 4,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 90.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.2521742284297943,
1.4949759244918823,
-2.831829071044922,
-0.05024271458387375,
1.5067294836044312,
-2.6298975944519043,
],
"cameraHorizon": 0.0,
"distance": 2.5996196269989014,
"isopen": False,
"name": "Plate",
"objectId": "Plate|-00.15|+01.49|-02.73",
"objectType": "Plate",
"openable": False,
"parentReceptacle": "Cabinet|-00.33|+01.89|-02.51",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.15120847523212433,
"y": 1.494760513305664,
"z": -2.730863332748413,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6563448309898376,
0.8580825328826904,
-2.015467643737793,
-0.576196014881134,
0.9382582902908325,
-1.9353333711624146,
],
"cameraHorizon": 0.0,
"distance": 1.7333749532699585,
"isopen": False,
"name": "StoveKnob2_Range2",
"objectId": "StoveKnob|-00.62|+00.90|-01.98",
"objectType": "StoveKnob",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6176999807357788,
"y": 0.8996000289916992,
"z": -1.9753999710083008,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406},
"visible": False,
},
{
"bounds3D": [
-1.3614451885223389,
0.9283196926116943,
-3.5663928985595703,
-1.2814817428588867,
0.9905622005462646,
-3.486574649810791,
],
"cameraHorizon": 0.0,
"distance": 3.32662034034729,
"isopen": False,
"name": "Tomato",
"objectId": "Tomato|-01.32|+00.93|-03.53",
"objectType": "Tomato",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.3221999406814575,
"y": 0.9303702116012573,
"z": -3.5262999534606934,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.7945087552070618,
0.07984550297260284,
-3.400216579437256,
-0.5677620768547058,
0.12984557449817657,
-3.1494078636169434,
],
"cameraHorizon": 0.0,
"distance": 3.1609947681427,
"isopen": False,
"name": "Pan1",
"objectId": "Pan|-00.68|+00.08|-03.27",
"objectType": "Pan",
"openable": False,
"parentReceptacle": "Cabinet|-00.63|+00.39|-03.01",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6810178160667419,
"y": 0.08484554290771484,
"z": -3.274834156036377,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {
"x": -6.1288878896448296e-06,
"y": 280.44842529296875,
"z": 1.398907170369057e-05,
},
"visible": False,
},
{
"bounds3D": [
-0.21095620095729828,
0.9303669929504395,
-2.992823362350464,
-0.09956331551074982,
1.1846275329589844,
-2.8814303874969482,
],
"cameraHorizon": 0.0,
"distance": 2.753037691116333,
"isopen": False,
"name": "Container",
"objectId": "Container|-00.16|+00.93|-02.94",
"objectType": "Container",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.15525996685028076,
"y": 0.9303703308105469,
"z": -2.937127113342285,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.40836191177368164,
0.14085793495178223,
-1.15748929977417,
0.030406057834625244,
1.7145073413848877,
-0.5005106925964355,
],
"cameraHorizon": 0.0,
"distance": 1.270815134048462,
"isopen": False,
"name": "Fridge1",
"objectId": "Fridge|-00.22|00.00|-00.83",
"objectType": "Fridge",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [
{"objectId": "Egg|-00.21|+00.27|-00.83", "pivotId": 0},
{"objectId": "Lettuce|-00.33|+00.74|-00.69", "pivotId": 1},
],
"position": {
"x": -0.22300000488758087,
"y": -0.0010000000474974513,
"z": -0.8289999961853027,
},
"receptacle": True,
"receptacleCount": 6,
"receptacleObjectIds": [
"Egg|-00.21|+00.27|-00.83",
"Lettuce|-00.33|+00.74|-00.69",
],
"rotation": {"x": 0.0, "y": 270.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6255507469177246,
0.8067288994789124,
-2.7551281452178955,
-0.38278937339782715,
0.826447069644928,
-2.7230093479156494,
],
"cameraHorizon": 0.0,
"distance": 2.5104362964630127,
"isopen": False,
"name": "Fork1",
"objectId": "Fork|-00.48|+00.81|-02.74",
"objectType": "Fork",
"openable": False,
"parentReceptacle": "Cabinet|-00.48|+00.78|-02.74",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.48289254307746887,
"y": 0.8116353750228882,
"z": -2.7390687465667725,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.553860604763031,
0.2711416482925415,
-0.4028606414794922,
-0.16013938188552856,
0.6648629307746887,
-0.00913935899734497,
],
"cameraHorizon": 0.0,
"distance": 1.0753535032272339,
"isopen": False,
"name": "GarbageCan",
"objectId": "GarbageCan|-00.36|00.00|-00.21",
"objectType": "GarbageCan",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.3569999933242798,
"y": -3.196139175543067e-08,
"z": -0.20600000023841858,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.8528260588645935,
0.9309259057044983,
-3.9095852375030518,
-0.714918315410614,
1.0337982177734375,
-3.7689216136932373,
],
"cameraHorizon": 0.0,
"distance": 3.600764513015747,
"isopen": False,
"name": "CoffeeCup1",
"objectId": "Mug|-00.78|+00.93|-03.85",
"objectType": "Mug",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.7749999761581421,
"y": 0.9301429986953735,
"z": -3.8499999046325684,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 50.4573860168457, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.19851021468639374,
0.9635931253433228,
-2.7536282539367676,
-0.09219704568386078,
1.3012911081314087,
-2.7334327697753906,
],
"cameraHorizon": 0.0,
"distance": 2.5742080211639404,
"isopen": False,
"name": "Knife1",
"objectId": "Knife|-00.14|+01.12|-02.75",
"objectType": "Knife",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.14190000295639038,
"y": 1.117300033569336,
"z": -2.7486000061035156,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 10.637146949768066, "y": 274.3685607910156, "z": 270.0},
"visible": False,
},
{
"bounds3D": [
-0.5118284225463867,
0.9333651065826416,
-1.9365284442901611,
-0.3299715518951416,
0.9572690725326538,
-1.754671573638916,
],
"cameraHorizon": 0.0,
"distance": 1.6306827068328857,
"isopen": False,
"name": "GasStoveTop_Range1",
"objectId": "StoveBurner|-00.42|+00.93|-01.85",
"objectType": "StoveBurner",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.42089998722076416,
"y": 0.9301429986953735,
"z": -1.8456000089645386,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.2595430612564087,
1.4952101707458496,
-1.5506460666656494,
-0.06338601559400558,
1.5541222095489502,
-1.3544890880584717,
],
"cameraHorizon": 0.0,
"distance": 1.4276409149169922,
"isopen": False,
"name": "Bowl",
"objectId": "Bowl|-00.16|+01.50|-01.45",
"objectType": "Bowl",
"openable": False,
"parentReceptacle": "Cabinet|-00.34|+01.89|-01.29",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.16146452724933624,
"y": 1.495596170425415,
"z": -1.45256769657135,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6566448211669922,
0.8584824800491333,
-2.3290677070617676,
-0.5764960050582886,
0.9386582374572754,
-2.2489333152770996,
],
"cameraHorizon": 0.0,
"distance": 2.0457139015197754,
"isopen": False,
"name": "StoveKnob2_Range4",
"objectId": "StoveKnob|-00.62|+00.90|-02.29",
"objectType": "StoveKnob",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6179999709129333,
"y": 0.8999999761581421,
"z": -2.2890000343322754,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406},
"visible": False,
},
{
"bounds3D": [
-0.2558910846710205,
0.9301429390907288,
-1.6137478351593018,
-0.0713789314031601,
1.1241569519042969,
-1.2920067310333252,
],
"cameraHorizon": 0.0,
"distance": 1.3400065898895264,
"isopen": False,
"name": "Toaster1",
"objectId": "Toaster|-00.16|+00.93|-01.45",
"objectType": "Toaster",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.1636350154876709,
"y": 0.9301429986953735,
"z": -1.4528772830963135,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.665656328201294,
0.924782931804657,
-3.7827463150024414,
-1.5564723014831543,
1.0276552438735962,
-3.6940536499023438,
],
"cameraHorizon": 0.0,
"distance": 3.5972678661346436,
"isopen": False,
"name": "CoffeeCup1",
"objectId": "Mug|-01.63|+00.92|-03.74",
"objectType": "Mug",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.625,
"y": 0.9240000247955322,
"z": -3.7383999824523926,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 180.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.29263991117477417,
1.5244276523590088,
-2.8414499759674072,
-0.16177701950073242,
2.2490928173065186,
-2.5138638019561768,
],
"cameraHorizon": 0.0,
"distance": 2.4678280353546143,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.33|+01.89|-02.51",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Plate|-00.15|+01.49|-02.73", "pivotId": 0}],
"position": {
"x": -0.3272084593772888,
"y": 1.8867602348327637,
"z": -2.5138635635375977,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Plate|-00.15|+01.49|-02.73"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6222020983695984,
0.7248871326446533,
-1.614982008934021,
-0.6195090413093567,
0.8706167936325073,
-1.2865678071975708,
],
"cameraHorizon": 0.0,
"distance": 1.2460066080093384,
"isopen": False,
"name": "Drawer",
"objectId": "Cabinet|-00.50|+00.78|-01.45",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Spoon|-00.50|+00.78|-01.45", "pivotId": 0}],
"position": {
"x": -0.5008437633514404,
"y": 0.7795612812042236,
"z": -1.450774908065796,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Spoon|-00.50|+00.78|-01.45"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5953136682510376,
0.09301626682281494,
-1.6149822473526,
-0.4644508361816406,
0.6846745014190674,
-1.3194092512130737,
],
"cameraHorizon": 0.0,
"distance": 1.5003715753555298,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.63|+00.39|-01.61",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6298819780349731,
"y": 0.3888453245162964,
"z": -1.6149822473526,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.2881675958633423,
0.7248872518539429,
-3.3793442249298096,
-1.0107892751693726,
0.8706167936325073,
-3.376683473587036,
],
"cameraHorizon": 0.0,
"distance": 3.2797152996063232,
"isopen": False,
"name": "Drawer",
"objectId": "Cabinet|-01.15|+00.78|-03.50",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -1.1494783163070679,
"y": 0.7825552225112915,
"z": -3.4980251789093018,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-3.5819432735443115,
0.09301620721817017,
-3.3748939037323,
-0.9107897281646729,
0.6846743822097778,
-3.362663507461548,
],
"cameraHorizon": 0.0,
"distance": 3.188777446746826,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-01.01|+00.39|-03.37",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -1.010789155960083,
"y": 0.3888453245162964,
"z": -3.368778705596924,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.8397345542907715,
0.09301596879959106,
-3.5855960845947266,
-0.3782111406326294,
0.6846745014190674,
-3.124072551727295,
],
"cameraHorizon": 0.0,
"distance": 2.8281376361846924,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.63|+00.39|-03.01",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Pan|-00.68|+00.08|-03.27", "pivotId": 0}],
"position": {
"x": -0.6330178380012512,
"y": 0.3888453245162964,
"z": -3.0088343620300293,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Pan|-00.68|+00.08|-03.27"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5953132510185242,
0.09301614761352539,
-2.9192330837249756,
-0.4644504189491272,
0.6846743822097778,
-2.5138638019561768,
],
"cameraHorizon": 0.0,
"distance": 2.3479816913604736,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.63|+00.39|-02.51",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Pot|-00.47|+00.08|-02.74", "pivotId": 0}],
"position": {
"x": -0.6298820972442627,
"y": 0.3888453245162964,
"z": -2.5138638019561768,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Pot|-00.47|+00.08|-02.74"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6035346984863281,
0.7248871326446533,
-2.9642739295959473,
-0.6004599332809448,
0.8706167936325073,
-2.5138635635375977,
],
"cameraHorizon": 0.0,
"distance": 2.513312578201294,
"isopen": False,
"name": "Drawer",
"objectId": "Cabinet|-00.48|+00.78|-02.74",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Fork|-00.48|+00.81|-02.74", "pivotId": 0}],
"position": {
"x": -0.4819878041744232,
"y": 0.777635395526886,
"z": -2.7390687465667725,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Fork|-00.48|+00.81|-02.74"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6152604818344116,
1.5292630195617676,
-3.8681092262268066,
-0.15373694896697998,
2.2539286613464355,
-3.406585216522217,
],
"cameraHorizon": 0.0,
"distance": 3.196824073791504,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.35|+01.89|-03.29",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.34654390811920166,
"y": 1.8915960788726807,
"z": -3.2933475971221924,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.3028959631919861,
1.5292634963989258,
-1.5821408033370972,
-0.17203307151794434,
2.2539284229278564,
-1.2865678071975708,
],
"cameraHorizon": 0.0,
"distance": 1.428146243095398,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+01.89|-01.29",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [{"objectId": "Bowl|-00.16|+01.50|-01.45", "pivotId": 0}],
"position": {
"x": -0.33746451139450073,
"y": 1.8915960788726807,
"z": -1.2865678071975708,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": ["Bowl|-00.16|+01.50|-01.45"],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.33359596133232117,
1.9445738792419434,
-2.497605323791504,
-0.20273306965827942,
2.275726795196533,
-2.12178373336792,
],
"cameraHorizon": 0.0,
"distance": 2.540541172027588,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+02.11|-02.50",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.33746451139450073,
"y": 2.1101503372192383,
"z": -2.497605323791504,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.33359596133232117,
1.9445738792419434,
-2.0148353576660156,
-0.20273306965827942,
2.275726795196533,
-1.631803035736084,
],
"cameraHorizon": 0.0,
"distance": 1.8198896646499634,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+02.11|-01.63",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.33746451139450073,
"y": 2.1101503372192383,
"z": -1.6318029165267944,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.334695965051651,
1.9445741176605225,
-1.2722522020339966,
-0.20383307337760925,
2.275726556777954,
-0.909758448600769,
],
"cameraHorizon": 0.0,
"distance": 1.5644868612289429,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+02.11|-01.27",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.33746451139450073,
"y": 2.1101503372192383,
"z": -1.2722522020339966,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.334695965051651,
1.9445738792419434,
-0.7808091640472412,
-0.20383307337760925,
2.275726795196533,
-0.3908956050872803,
],
"cameraHorizon": 0.0,
"distance": 1.1926738023757935,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.34|+02.11|-00.39",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.33746451139450073,
"y": 2.1101503372192383,
"z": -0.39089563488960266,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.29263991117477417,
1.524427890777588,
-3.242128849029541,
-0.16177701950073242,
2.2490928173065186,
-2.9145426750183105,
],
"cameraHorizon": 0.0,
"distance": 3.149275064468384,
"isopen": False,
"name": "Cabinet",
"objectId": "Cabinet|-00.33|+01.89|-03.24",
"objectType": "Cabinet",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.3272084593772888,
"y": 1.8867603540420532,
"z": -3.24212908744812,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 270.019775390625, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.0901057720184326,
0.7320617437362671,
-3.888105630874634,
-0.12189435958862305,
0.952538251876831,
-2.9198944568634033,
],
"cameraHorizon": 0.0,
"distance": 3.15781831741333,
"isopen": False,
"name": "Sink",
"objectId": "Sink|-00.61|+00.94|-03.40",
"objectType": "Sink",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6060000061988831,
"y": 0.9419999718666077,
"z": -3.4040000438690186,
},
"receptacle": True,
"receptacleCount": 4,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 44.999996185302734, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.24254396557807922,
0.2711706757545471,
-0.8578107357025146,
-0.18492531776428223,
0.3472771644592285,
-0.8001892566680908,
],
"cameraHorizon": 0.0,
"distance": 1.0737521648406982,
"isopen": False,
"name": "Egg",
"objectId": "Egg|-00.21|+00.27|-00.83",
"objectType": "Egg",
"openable": False,
"parentReceptacle": "Fridge|-00.22|00.00|-00.83",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.2137332558631897,
"y": 0.2719060778617859,
"z": -0.8289999961853027,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 270.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.5313434600830078,
0.9396243691444397,
-3.5390284061431885,
-1.444072961807251,
1.0310288667678833,
-3.452800989151001,
],
"cameraHorizon": 0.0,
"distance": 3.3292236328125,
"isopen": False,
"name": "Apple",
"objectId": "Apple|-01.49|+00.93|-03.50",
"objectType": "Apple",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.4870775938034058,
"y": 0.9303702116012573,
"z": -3.495858669281006,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.42987868189811707,
0.7445617914199829,
-0.7644813060760498,
-0.27457037568092346,
0.8978313207626343,
-0.614234447479248,
],
"cameraHorizon": 0.0,
"distance": 0.7442509531974792,
"isopen": False,
"name": "Lettuce1",
"objectId": "Lettuce|-00.33|+00.74|-00.69",
"objectType": "Lettuce",
"openable": False,
"parentReceptacle": "Fridge|-00.22|00.00|-00.83",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.2137332707643509,
"y": 0.7358768582344055,
"z": -0.6933581233024597,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 270.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6563448309898376,
0.8579825162887573,
-1.8734675645828247,
-0.576196014881134,
0.9381582736968994,
-1.7933334112167358,
],
"cameraHorizon": 0.0,
"distance": 1.5920926332473755,
"isopen": False,
"name": "StoveKnob2_Range1",
"objectId": "StoveKnob|-00.62|+00.90|-01.83",
"objectType": "StoveKnob",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6176999807357788,
"y": 0.8995000123977661,
"z": -1.833400011062622,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406},
"visible": False,
},
{
"bounds3D": [
-0.6007806062698364,
0.9309259057044983,
-1.624263048171997,
-0.4915965795516968,
1.0337982177734375,
-1.5355703830718994,
],
"cameraHorizon": 0.0,
"distance": 1.3494340181350708,
"isopen": False,
"name": "CoffeeCup1",
"objectId": "Mug|-00.53|+00.93|-01.58",
"objectType": "Mug",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.5322529077529907,
"y": 0.9301429986953735,
"z": -1.5799167156219482,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.3178284764289856,
0.9333651065826416,
-2.3485283851623535,
-0.1359715461730957,
0.9572690725326538,
-2.1666717529296875,
],
"cameraHorizon": 0.0,
"distance": 2.0758063793182373,
"isopen": False,
"name": "GasStoveTop_Range3",
"objectId": "StoveBurner|-00.23|+00.93|-02.26",
"objectType": "StoveBurner",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.22689999639987946,
"y": 0.9301429986953735,
"z": -2.2576000690460205,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5608127117156982,
0.9253336787223816,
-2.6081254482269287,
-0.2908085584640503,
0.9346393942832947,
-2.578345537185669,
],
"cameraHorizon": 0.0,
"distance": 2.3701114654541016,
"isopen": False,
"name": "butterKnife",
"objectId": "ButterKnife|-00.43|+00.93|-02.60",
"objectType": "ButterKnife",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.4278929829597473,
"y": 0.9303703904151917,
"z": -2.5970890522003174,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-1.4711631536483765,
0.9296106696128845,
-3.788638114929199,
-1.1927717924118042,
1.0843539237976074,
-3.621340751647949,
],
"cameraHorizon": 0.0,
"distance": 3.504368305206299,
"isopen": False,
"name": "Bread",
"objectId": "Bread|-01.33|+00.93|-03.71",
"objectType": "Bread",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.3320000171661377,
"y": 0.9303702712059021,
"z": -3.7049999237060547,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 6.309757232666016, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6563448309898376,
0.8581824898719788,
-2.1692676544189453,
-0.576196014881134,
0.9383582472801208,
-2.0891332626342773,
],
"cameraHorizon": 0.0,
"distance": 1.8865195512771606,
"isopen": False,
"name": "StoveKnob2_Range3",
"objectId": "StoveKnob|-00.62|+00.90|-02.13",
"objectType": "StoveKnob",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.6176999807357788,
"y": 0.8996999859809875,
"z": -2.129199981689453,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 315.0, "y": 89.97400665283203, "z": 180.03199768066406},
"visible": False,
},
{
"bounds3D": [
-1.6801782846450806,
0.9300780892372131,
-3.5211691856384277,
-1.5957564115524292,
1.001486897468567,
-3.4346466064453125,
],
"cameraHorizon": 0.0,
"distance": 3.3446850776672363,
"isopen": False,
"name": "Potato",
"objectId": "Potato|-01.63|+00.93|-03.48",
"objectType": "Potato",
"openable": False,
"parentReceptacle": "",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -1.6319999694824219,
"y": 0.9303702116012573,
"z": -3.475545883178711,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.3178284764289856,
0.9333651065826416,
-1.9365284442901611,
-0.1359715461730957,
0.9572690725326538,
-1.754671573638916,
],
"cameraHorizon": 0.0,
"distance": 1.6806108951568604,
"isopen": False,
"name": "GasStoveTop_Range2",
"objectId": "StoveBurner|-00.23|+00.93|-01.85",
"objectType": "StoveBurner",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.22689999639987946,
"y": 0.9301429986953735,
"z": -1.8456000089645386,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-2.784135103225708,
0.9281330108642578,
-3.721567153930664,
-2.5158650875091553,
1.3016245365142822,
-3.4185357093811035,
],
"cameraHorizon": 0.0,
"distance": 3.8293373584747314,
"isopen": False,
"name": "CoffeeMachine2",
"objectId": "CoffeeMachine|-02.65|+00.93|-03.57",
"objectType": "CoffeeMachine",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -2.6500000953674316,
"y": 0.9303701519966125,
"z": -3.5739998817443848,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.6211026906967163,
0.7797816395759583,
-1.4715903997421265,
-0.41446253657341003,
0.7992590069770813,
-1.4300788640975952,
],
"cameraHorizon": 0.0,
"distance": 1.2453322410583496,
"isopen": False,
"name": "Spoon",
"objectId": "Spoon|-00.50|+00.78|-01.45",
"objectType": "Spoon",
"openable": False,
"parentReceptacle": "Cabinet|-00.50|+00.78|-01.45",
"pickupable": True,
"pivotSimObjs": [],
"position": {
"x": -0.4998437762260437,
"y": 0.784561276435852,
"z": -1.450774908065796,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5118284225463867,
0.9333651065826416,
-2.3485283851623535,
-0.3299715518951416,
0.9572690725326538,
-2.1666717529296875,
],
"cameraHorizon": 0.0,
"distance": 2.0355944633483887,
"isopen": False,
"name": "GasStoveTop_Range4",
"objectId": "StoveBurner|-00.42|+00.93|-02.26",
"objectType": "StoveBurner",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.42089998722076416,
"y": 0.9301429986953735,
"z": -2.2576000690460205,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.5738816261291504,
0.0948454737663269,
-2.837768316268921,
-0.37388163805007935,
0.2948455214500427,
-2.637768030166626,
],
"cameraHorizon": 0.0,
"distance": 2.6651856899261475,
"isopen": False,
"name": "Pot1",
"objectId": "Pot|-00.47|+00.08|-02.74",
"objectType": "Pot",
"openable": False,
"parentReceptacle": "Cabinet|-00.63|+00.39|-02.51",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.4738820791244507,
"y": 0.08484548330307007,
"z": -2.737863779067993,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": -1.0245284101983998e-05, "y": 0.0, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-2.613636016845703,
0.0006269514560699463,
-3.853076219558716,
-2.085458755493164,
0.874946117401123,
-3.286182165145874,
],
"cameraHorizon": 0.0,
"distance": 3.848210096359253,
"isopen": False,
"name": "Chair5",
"objectId": "Chair|-02.35|00.00|-03.60",
"objectType": "Chair",
"openable": False,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -2.3540000915527344,
"y": -5.653919288306497e-07,
"z": -3.6019999980926514,
},
"receptacle": False,
"receptacleCount": 0,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 74.2330551147461, "z": 0.0},
"visible": False,
},
{
"bounds3D": [
-0.3505246043205261,
1.5073667764663696,
-2.2319486141204834,
0.009090721607208252,
1.8599165678024292,
-1.720513105392456,
],
"cameraHorizon": 0.0,
"distance": 1.9566510915756226,
"isopen": False,
"name": "Microwave4",
"objectId": "Microwave|-00.17|+01.49|-02.06",
"objectType": "Microwave",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.1746000051498413,
"y": 1.485553503036499,
"z": -2.055999994277954,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
},
],
"sceneName": "FloorPlan28",
"screenHeight": 300,
"screenWidth": 300,
}
@pytest.fixture
def event_complex():
return Event(metadata_complex)
@pytest.fixture
def event():
return Event(metadata_simple)
@pytest.fixture
def event_with_frame(event):
e = event
with open(os.path.join(TESTS_DATA_DIR, "rgb-image.raw"), "rb") as f:
raw_image = memoryview(f.read())
e.add_image(raw_image)
return e
with open(os.path.join(TESTS_DATA_DIR, "instance_segmentation_metadata.json")) as f:
metadata = json.loads(f.read())
e = Event(metadata)
with open(
os.path.join(TESTS_DATA_DIR, "instance_segmentation_frame_rgb24.raw"), "rb"
) as f:
seg_frame_data = f.read()
e.add_image_ids(seg_frame_data)
return e
def _event_with_segmentation(raw_data_path):
with open(os.path.join(TESTS_DATA_DIR, "instance_segmentation_metadata.json")) as f:
metadata = json.loads(f.read())
e = Event(metadata)
with open(os.path.join(TESTS_DATA_DIR, raw_data_path), "rb") as f:
seg_frame_data = f.read()
e.add_image_ids(seg_frame_data)
return e
segmentation_events = [
_event_with_segmentation("instance_segmentation_frame.raw"),
_event_with_segmentation("instance_segmentation_frame_rgb24.raw"),
]
def test_get_object(event):
microwave = {
"bounds3D": [
-0.3505246043205261,
1.5073667764663696,
-2.2319486141204834,
0.009090721607208252,
1.8599165678024292,
-1.720513105392456,
],
"cameraHorizon": 0.0,
"distance": 1.9566510915756226,
"isopen": False,
"name": "Microwave4",
"objectId": "Microwave|-00.17|+01.49|-02.06",
"objectType": "Microwave",
"openable": True,
"parentReceptacle": "",
"pickupable": False,
"pivotSimObjs": [],
"position": {
"x": -0.1746000051498413,
"y": 1.485553503036499,
"z": -2.055999994277954,
},
"receptacle": True,
"receptacleCount": 1,
"receptacleObjectIds": [],
"rotation": {"x": 0.0, "y": 0.0, "z": 0.0},
"visible": False,
}
assert event.get_object("Microwave|-00.17|+01.49|-02.06") == microwave
assert event.get_object("FOOO") is None
def test_cv2img(event_with_frame):
cvf = np.load(os.path.join(TESTS_DATA_DIR, "test-image1-bgr.npy"))
assert event_with_frame.cv2img.shape == event_with_frame.frame.shape
assert np.all(cvf == event_with_frame.cv2img)
assert not np.all(event_with_frame.frame == event_with_frame.cv2img)
def test_add_image(event):
with open(os.path.join(TESTS_DATA_DIR, "rgb-image.raw"), "rb") as f:
raw_image = memoryview(f.read())
f = np.load(os.path.join(TESTS_DATA_DIR, "test-image1-rgb.npy"))
assert event.frame is None
event.add_image(raw_image)
assert event.frame.shape == (300, 300, 3)
assert np.all(f == event.frame)
def test_metadata(event):
assert event.screen_height == 300
assert event.screen_width == 300
assert event.pose == (-750, -250, 0, 0)
def test_metadata_wrapper_reachable_positions_index_error():
metadata_clone = json.loads(json.dumps(metadata_simple))
metadata_clone["lastAction"] = "RotateRight"
e = Event(metadata_clone)
caught_error = False
try:
e.metadata["reachablePositions"]
except IndexError as e:
caught_error = True
assert (
caught_error
), "should have caught an index error trying to access reachablePositions for a non-GetSceneBounds/GetReachablePositions action"
def test_metadata_wrapper_reachable_positions():
metadata_clone = json.loads(json.dumps(metadata_simple))
metadata_clone["lastAction"] = "GetReachablePositions"
if "reachablePositions" in metadata_clone:
del metadata_clone["reachablePositions"]
metadata_clone["actionReturn"] = "GetReachablePositions RETURN"
e = Event(metadata_clone)
with warnings.catch_warnings(record=True) as w:
assert (
e.metadata["reachablePositions"] == "GetReachablePositions RETURN"
), "GetReachablePositions should return actionReturn"
assert len(w) == 1
assert (
'event.metadata["reachablePositions"] is deprecated and has been remapped to event.metadata["actionReturn"]'
in str(w[-1].message)
)
def test_metadata_wrapper_scene_bounds_reachable_positions():
metadata_clone = json.loads(json.dumps(metadata_simple))
metadata_clone["lastAction"] = "GetSceneBounds"
if "actionReturn" in metadata_clone:
del metadata_clone["actionReturn"]
metadata_clone["reachablePositions"] = "reachablePositions SCENEBOUNDS RETURN"
e = Event(metadata_clone)
with warnings.catch_warnings(record=True) as w:
assert (
e.metadata["reachablePositions"] == "reachablePositions SCENEBOUNDS RETURN"
), "GetSceneBounds reachablePositions should return reachablePositions"
assert len(w) == 0
def test_metadata_wrapper_scene_bounds_action_return():
metadata_clone = json.loads(json.dumps(metadata_simple))
if "reachablePositions" in metadata_clone:
del metadata_clone["reachablePositions"]
metadata_clone["lastAction"] = "GetSceneBounds"
metadata_clone["actionReturn"] = "SCENEBOUNDS RETURN"
e = Event(metadata_clone)
with warnings.catch_warnings(record=True) as w:
assert (
e.metadata["reachablePositions"] == "SCENEBOUNDS RETURN"
), "GetSceneBounds reachablePositions should return actionReturn"
assert len(w) == 1
assert (
'event.metadata["reachablePositions"] is deprecated and has been remapped to event.metadata["actionReturn"]'
in str(w[-1].message)
)
def test_objects_by_test(event):
all_mugs = [o["objectId"] for o in event.objects_by_type("Mug")]
mug_object_ids = [
"Mug|-00.78|+00.93|-03.85",
"Mug|-01.63|+00.92|-03.74",
"Mug|-00.53|+00.93|-01.58",
]
assert all_mugs == mug_object_ids
assert event.objects_by_type("FOO") == []
def test_depth_float32():
metadata = json.loads(json.dumps(metadata_simple))
metadata["depthFormat"] = "Meters"
data = open(os.path.join(TESTS_DATA_DIR, "image-depth-datafloat32.raw"), "rb").read()
event_float32 = Event(metadata)
event_float32.add_image_depth(
data,
depth_format=DepthFormat.Meters,
camera_near_plane=0.1,
camera_far_plane=20.0,
add_noise=False
)
assert np.all(event_float32.depth_frame[0:1, 0:8] == np.array([[0.82262456, 0.82262456, 0.82262456, 0.82262456, 0.82262456, 0.82262456, 0.82262456, 0.82262456]], dtype=np.float32))
def test_depth_256():
metadata = json.loads(json.dumps(metadata_simple))
if "depthFormat" in metadata:
del(metadata["depthFormat"])
event_256 = Event(metadata)
data = open(os.path.join(TESTS_DATA_DIR, "image-depth-data256.raw"), "rb").read()
event_256.add_image_depth(
data,
depth_format=DepthFormat.Meters,
camera_near_plane=0.1,
camera_far_plane=20.0,
add_noise=False)
assert np.all(event_256.depth_frame[0:1, 0:8] == np.array([[0.8223207, 0.8223207, 0.8223207, 0.8223207, 0.8223207, 0.8223207, 0.8223207, 0.8223207]], dtype=np.float32))
def test_process_colors(event_complex):
event_complex.process_colors
assert len(event_complex.color_to_object_id.keys()) == 125
assert event_complex.color_to_object_id[(207, 119, 70)] == "Spatula3.001"
assert (
event_complex.color_to_object_id[(141, 139, 54)]
== "Cabinet|-00.63|+00.39|-02.51"
)
assert (
event_complex.color_to_object_id[(29, 84, 249)] == "Spoon|-00.50|+00.78|-01.45"
)
assert event_complex.color_to_object_id[(235, 57, 90)] == "Spoon"
assert event_complex.object_id_to_color["Spatula3.001"] == (207, 119, 70)
assert event_complex.object_id_to_color["Cabinet|-00.63|+00.39|-02.51"] == (
141,
139,
54,
)
assert event_complex.object_id_to_color["Spoon|-00.50|+00.78|-01.45"] == (
29,
84,
249,
)
assert event_complex.object_id_to_color["Spoon"] == (235, 57, 90)
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_instance_segmentation(event_with_segmentation):
assert (
event_with_segmentation.instance_masks[
"CoffeeMachine|+00.89|+00.90|-02.13"
].sum()
== 14833
)
expected_keys = [
"CoffeeMachine|+00.89|+00.90|-02.13",
"Cabinet|+00.95|+02.16|-02.38",
"StoveBurner|+01.08|+00.92|-01.50",
"Cabinet|+00.95|+02.16|-00.76",
"StandardWallSize|1|0|2",
"Cabinet|+00.95|+02.44|-01.78",
"StoveBurner|+00.84|+00.92|-01.10",
"StoveBurner|+00.84|+00.92|-01.50",
"StoveBurner|+01.08|+00.92|-01.10",
"StandardCounterHeightWidth|0.98|0|0.18",
"StandardUpperCabinetHeightWidth|1.28|0|0.18",
"StandardWallTileHeight1|1.3|0|0.18",
"StoveBase1|0.997|0|-1.302",
"StoveTopGas|-1.503001|0|-1.06545",
"Pan|+00.85|+00.95|-01.08",
"SaltShaker|+01.19|+00.90|-01.80",
"Microwave|+01.04|+01.68|-01.30",
"Cup|+01.08|+00.90|-00.77",
"StoveKnob|+00.67|+00.90|-01.24",
"StoveKnob|+00.67|+00.90|-01.09",
"StoveKnob|+00.67|+00.90|-01.52",
"StoveKnob|+00.67|+00.90|-01.37",
"PepperShaker|+01.09|+00.90|-01.82",
"Spatula|+01.10|+00.91|-00.63",
"PaperTowelRoll|+01.22|+01.01|-00.52",
"CounterTop|+00.93|+00.95|-02.05",
"CounterTop|+00.93|+00.95|-00.21",
]
assert list(event_with_segmentation.instance_masks.keys()) == expected_keys
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_test_lazy_instance_contains(event_with_segmentation):
assert "CoffeeMachine|+00.89|+00.90|-02.13" in event_with_segmentation.instance_masks
assert not "Cabinet|+00.65|+00.48|+00.24" in event_with_segmentation.instance_masks
assert not "Foo" in event_with_segmentation.instance_masks
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_test_lazy_class_contains(event_with_segmentation):
assert "Cabinet" in event_with_segmentation.class_masks
assert not "Window" in event_with_segmentation.class_masks
assert not "Foo" in event_with_segmentation.class_masks
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_instance_detections2d(event_with_segmentation):
assert event_with_segmentation.instance_detections2D[
"CoffeeMachine|+00.89|+00.90|-02.13"
] == (509, 371, 599, 576)
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_class_segmentation(event_with_segmentation):
assert event_with_segmentation.class_masks["Cabinet"].sum() == 111227
expected_keys = [
"Cabinet",
"StoveBurner",
"StandardWallSize",
"StandardCounterHeightWidth",
"StandardUpperCabinetHeightWidth",
"StandardWallTileHeight1",
"StoveBase1",
"StoveTopGas",
"Pan",
"SaltShaker",
"Microwave",
"Cup",
"StoveKnob",
"CoffeeMachine",
"PepperShaker",
"Spatula",
"PaperTowelRoll",
"CounterTop",
]
assert list(event_with_segmentation.class_masks.keys()) == expected_keys
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_class_segmentation_missing(event_with_segmentation):
with pytest.raises(KeyError):
event_with_segmentation.class_masks["Stove"]
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_class_segmentation_background(event_with_segmentation):
# colors that don't appear in the metadata get labeled as "background"
class_colors_copy = json.loads(
json.dumps(event_with_segmentation.instance_masks.class_colors)
)
del event_with_segmentation.instance_masks.class_colors["Cabinet"]
if "background" in event_with_segmentation.class_masks._masks:
del event_with_segmentation.class_masks._masks["background"]
if "Cabinet" in event_with_segmentation.class_masks._masks:
del event_with_segmentation.class_masks._masks["Cabinet"]
assert event_with_segmentation.class_masks["background"].sum() == 111227
with pytest.raises(KeyError):
event_with_segmentation.class_masks["Cabinet"]
event_with_segmentation.instance_masks.class_colors = class_colors_copy
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_class_detections2d(event_with_segmentation):
assert event_with_segmentation.class_detections2D["Cabinet"] == (
(473, 0, 599, 284),
(0, 0, 145, 284),
(164, 0, 467, 109),
)
with pytest.raises(KeyError):
event_with_segmentation.class_detections2D["Stove"]
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_class_detections2d_missing(event_with_segmentation):
with pytest.raises(KeyError):
event_with_segmentation.class_detections2D["Stove"]
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_instance_masks_keys(event_with_segmentation):
keys = set(
{
'StoveTopGas|-1.503001|0|-1.06545',
'Cabinet|+00.95|+02.16|-02.38',
'Cup|+01.08|+00.90|-00.77',
'StoveBurner|+00.84|+00.92|-01.50',
'StandardUpperCabinetHeightWidth|1.28|0|0.18',
'Spatula|+01.10|+00.91|-00.63',
'PaperTowelRoll|+01.22|+01.01|-00.52',
'StoveBase1|0.997|0|-1.302',
'Cabinet|+00.95|+02.16|-00.76',
'StoveKnob|+00.67|+00.90|-01.09',
'StoveBurner|+01.08|+00.92|-01.10',
'StoveKnob|+00.67|+00.90|-01.37',
'StandardCounterHeightWidth|0.98|0|0.18',
'StoveBurner|+00.84|+00.92|-01.10',
'StandardWallSize|1|0|2',
'StoveKnob|+00.67|+00.90|-01.52',
'Microwave|+01.04|+01.68|-01.30',
'CoffeeMachine|+00.89|+00.90|-02.13',
'Cabinet|+00.95|+02.44|-01.78',
'StoveBurner|+01.08|+00.92|-01.50',
'StandardWallTileHeight1|1.3|0|0.18',
'StoveKnob|+00.67|+00.90|-01.24',
'CounterTop|+00.93|+00.95|-00.21',
'Pan|+00.85|+00.95|-01.08',
'SaltShaker|+01.19|+00.90|-01.80',
'PepperShaker|+01.09|+00.90|-01.82',
'CounterTop|+00.93|+00.95|-02.05',
}
)
assert set(event_with_segmentation.instance_masks.keys()) == keys
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_instance_detections2d_keys(event_with_segmentation):
keys = set(
{
'StoveTopGas|-1.503001|0|-1.06545',
'Cabinet|+00.95|+02.16|-02.38',
'Cup|+01.08|+00.90|-00.77',
'StoveBurner|+00.84|+00.92|-01.50',
'StandardUpperCabinetHeightWidth|1.28|0|0.18',
'Spatula|+01.10|+00.91|-00.63',
'PaperTowelRoll|+01.22|+01.01|-00.52',
'StoveBase1|0.997|0|-1.302',
'Cabinet|+00.95|+02.16|-00.76',
'StoveKnob|+00.67|+00.90|-01.09',
'StoveBurner|+01.08|+00.92|-01.10',
'StoveKnob|+00.67|+00.90|-01.37',
'StandardCounterHeightWidth|0.98|0|0.18',
'StoveBurner|+00.84|+00.92|-01.10',
'StandardWallSize|1|0|2',
'StoveKnob|+00.67|+00.90|-01.52',
'Microwave|+01.04|+01.68|-01.30',
'CoffeeMachine|+00.89|+00.90|-02.13',
'Cabinet|+00.95|+02.44|-01.78',
'StoveBurner|+01.08|+00.92|-01.50',
'StandardWallTileHeight1|1.3|0|0.18',
'StoveKnob|+00.67|+00.90|-01.24',
'CounterTop|+00.93|+00.95|-00.21',
'Pan|+00.85|+00.95|-01.08',
'SaltShaker|+01.19|+00.90|-01.80',
'PepperShaker|+01.09|+00.90|-01.82',
'CounterTop|+00.93|+00.95|-02.05',
}
)
assert set(event_with_segmentation.instance_detections2D.keys()) == keys
@pytest.mark.parametrize("event_with_segmentation", segmentation_events)
def test_lazy_class_detections2d_keys(event_with_segmentation):
keys = set(
{
'Cabinet',
'CoffeeMachine',
'CounterTop',
'Cup',
'Microwave',
'Pan',
'PaperTowelRoll',
'PepperShaker',
'SaltShaker',
'Spatula',
'StandardCounterHeightWidth',
'StandardUpperCabinetHeightWidth',
'StandardWallSize',
'StandardWallTileHeight1',
'StoveBase1',
'StoveBurner',
'StoveKnob',
'StoveTopGas',
}
)
assert set(event_with_segmentation.class_detections2D.keys()) == keys
| ai2thor-main | ai2thor/tests/test_event.py |
import ai2thor.wsgi_server
import pytest
import numpy as np
import json
from ai2thor.wsgi_server import Queue
from ai2thor.tests.test_event import metadata_simple
from io import BytesIO
import copy
def generate_multi_agent_form(metadata, sequence_id=1):
agent2 = copy.deepcopy(metadata)
agent2["agentId"] = 1
agent1 = metadata
agents = [agent1, agent2]
boundary = b"--OVCo05I3SVXLPeTvCgJjHl1EOleL4u9TDx5raRVt"
data = (
b"\r\n"
+ boundary
+ b'\r\nContent-Type: text/plain; charset="utf-8"\r\nContent-disposition: form-data; name="metadata"\r\n\r\n'
)
data += json.dumps(
dict(agents=agents, sequenceId=sequence_id, activeAgentId=1)
).encode("utf8")
data += (
b"\r\n"
+ boundary
+ b'\r\nContent-Type: text/plain; charset="utf-8"\r\nContent-disposition: form-data; name="actionReturns"\r\n\r\n'
)
data += (
b"\r\n"
+ boundary
+ b'\r\nContent-Type: text/plain; charset="utf-8"\r\nContent-disposition: form-data; name="token"\r\n\r\n'
)
data += b"12cb40b5-3a70-4316-8ae2-82cbff6c9902"
data += b"\r\n" + boundary + b"--\r\n"
return data
def generate_form(metadata, sequence_id=1):
boundary = b"--OVCo05I3SVXLPeTvCgJjHl1EOleL4u9TDx5raRVt"
data = (
b"\r\n"
+ boundary
+ b'\r\nContent-Type: text/plain; charset="utf-8"\r\nContent-disposition: form-data; name="metadata"\r\n\r\n'
)
data += json.dumps(dict(agents=[metadata], sequenceId=sequence_id)).encode("utf8")
data += (
b"\r\n"
+ boundary
+ b'\r\nContent-Type: text/plain; charset="utf-8"\r\nContent-disposition: form-data; name="actionReturns"\r\n\r\n'
)
data += json.dumps([None]).encode("utf8")
data += (
b"\r\n"
+ boundary
+ b'\r\nContent-Type: text/plain; charset="utf-8"\r\nContent-disposition: form-data; name="token"\r\n\r\n'
)
data += b"12cb40b5-3a70-4316-8ae2-82cbff6c9902"
data += b"\r\n" + boundary + b"--\r\n"
return data
@pytest.fixture
def server():
return ai2thor.wsgi_server.WsgiServer(host="127.0.0.1")
@pytest.fixture
def client(server):
return server.app.test_client()
def test_ping(client):
res = client.get("/ping")
assert res.data == b"pong"
def test_multi_agent_train():
s = ai2thor.wsgi_server.WsgiServer(host="127.0.0.1")
s.send(dict(action="RotateRight"))
c = s.app.test_client()
res = c.post(
"/train",
buffered=True,
content_type="multipart/form-data; boundary=OVCo05I3SVXLPeTvCgJjHl1EOleL4u9TDx5raRVt",
input_stream=BytesIO(generate_multi_agent_form(metadata_simple, s.sequence_id)),
)
assert res.status_code == 200
def test_train_numpy_action():
s = ai2thor.wsgi_server.WsgiServer(host="127.0.0.1")
s.send(
dict(
action="Teleport",
rotation=dict(y=np.array([24])[0]),
moveMagnitude=np.array([55.5])[0],
myCustomArray=np.array([1, 2]),
)
)
c = s.app.test_client()
res = c.post(
"/train",
buffered=True,
content_type="multipart/form-data; boundary=OVCo05I3SVXLPeTvCgJjHl1EOleL4u9TDx5raRVt",
input_stream=BytesIO(generate_form(metadata_simple, s.sequence_id)),
)
j = json.loads(res.get_data())
assert j == {
"action": "Teleport",
"rotation": {"y": 24},
"sequenceId": 1,
"moveMagnitude": 55.5,
"myCustomArray": [1, 2],
}
assert res.status_code == 200
def test_train():
s = ai2thor.wsgi_server.WsgiServer(host="127.0.0.1")
s.send(dict(action="RotateRight"))
c = s.app.test_client()
res = c.post(
"/train",
buffered=True,
content_type="multipart/form-data; boundary=OVCo05I3SVXLPeTvCgJjHl1EOleL4u9TDx5raRVt",
input_stream=BytesIO(generate_form(metadata_simple, s.sequence_id)),
)
assert res.status_code == 200
def test_client_token_mismatch():
s = ai2thor.wsgi_server.WsgiServer(host="127.0.0.1")
s.send(dict(action="RotateRight"))
s.client_token = "123456"
c = s.app.test_client()
res = c.post(
"/train",
buffered=True,
content_type="multipart/form-data; boundary=OVCo05I3SVXLPeTvCgJjHl1EOleL4u9TDx5raRVt",
input_stream=BytesIO(generate_form(metadata_simple, s.sequence_id + 1)),
)
assert res.status_code == 403
def test_non_multipart():
s = ai2thor.wsgi_server.WsgiServer(host="127.0.0.1")
s.send(dict(action="RotateRight"))
c = s.app.test_client()
s.client_token = "1234567"
m = dict(agents=[metadata_simple], sequenceId=s.sequence_id)
res = c.post(
"/train",
data=dict(
metadata=json.dumps(m),
token=s.client_token,
actionReturns=json.dumps([None]),
),
)
assert res.status_code == 200
def test_sequence_id_mismatch():
s = ai2thor.wsgi_server.WsgiServer(host="127.0.0.1")
s.send(dict(action="RotateRight"))
c = s.app.test_client()
res = c.post(
"/train",
buffered=True,
content_type="multipart/form-data; boundary=OVCo05I3SVXLPeTvCgJjHl1EOleL4u9TDx5raRVt",
input_stream=BytesIO(generate_form(metadata_simple, s.sequence_id + 1)),
)
assert res.status_code == 500
| ai2thor-main | ai2thor/tests/test_server.py |
# import pytest
import copy
import glob
import json
import os
import random
import re
import string
import time
import shutil
import warnings
import jsonschema
import numpy as np
import pytest
from PIL import ImageChops, ImageFilter, Image
from ai2thor.controller import Controller
from ai2thor.build import TEST_OUTPUT_DIRECTORY
from ai2thor.tests.constants import TESTS_DATA_DIR, TEST_SCENE
from ai2thor.wsgi_server import WsgiServer
from ai2thor.fifo_server import FifoServer
from PIL import ImageChops, ImageFilter, Image
import glob
import cv2
import functools
import ctypes
# Defining const classes to lessen the possibility of a misspelled key
class Actions:
AddThirdPartyCamera = "AddThirdPartyCamera"
UpdateThirdPartyCamera = "UpdateThirdPartyCamera"
class MultiAgentMetadata:
thirdPartyCameras = "thirdPartyCameras"
class ThirdPartyCameraMetadata:
position = "position"
rotation = "rotation"
fieldOfView = "fieldOfView"
class TestController(Controller):
def unity_command(self, width, height, headless):
command = super().unity_command(width, height, headless)
# force OpenGLCore to get used so that the tests run in a consistent way
# With low power graphics cards (such as those in the test environment)
# Metal behaves in inconsistent ways causing test failures
command.append("-force-glcore")
return command
def build_controller(**args):
default_args = dict(scene=TEST_SCENE, local_build=True)
default_args.update(args)
# during a ci-build we will get a warning that we are using a commit_id for the
# build instead of 'local'
with warnings.catch_warnings():
warnings.simplefilter("ignore")
print("args test controller")
print(default_args)
c = TestController(**default_args)
# used for resetting
c._original_initialization_parameters = c.initialization_parameters
return c
_wsgi_controller = build_controller(server_class=WsgiServer)
_fifo_controller = build_controller(server_class=FifoServer)
def skip_reset(controller):
# setting attribute on the last event so we can tell if the
# controller gets used since last event will change after each step
controller.last_event._pytest_skip_reset = True
# resetting on each use so that each tests works with
# the scene in a pristine state
def reset_controller(controller):
controller.initialization_parameters = copy.deepcopy(
controller._original_initialization_parameters
)
if not hasattr(controller.last_event, "_pytest_skip_reset"):
controller.reset(TEST_SCENE, height=300, width=300)
skip_reset(controller)
return controller
def save_image(file_path, image, flip_br=False):
img = image
if flip_br:
img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
cv2.imwrite(file_path, img)
def depth_to_gray_rgb(data):
return (255.0 / data.max() * (data - data.min())).astype(np.uint8)
@pytest.fixture
def wsgi_controller():
return reset_controller(_wsgi_controller)
@pytest.fixture
def fifo_controller():
return reset_controller(_fifo_controller)
fifo_wsgi = [_fifo_controller, _wsgi_controller]
fifo = [_fifo_controller]
BASE_FP28_POSITION = dict(x=-1.5, z=-1.5, y=0.901,)
BASE_FP28_LOCATION = dict(
**BASE_FP28_POSITION, rotation={"x": 0, "y": 0, "z": 0}, horizon=0, standing=True,
)
def teleport_to_base_location(controller: Controller):
assert controller.last_event.metadata["sceneName"] == TEST_SCENE
controller.step("TeleportFull", **BASE_FP28_LOCATION)
assert controller.last_event.metadata["lastActionSuccess"]
def setup_function(function):
for c in fifo_wsgi:
reset_controller(c)
def teardown_module(module):
for c in fifo_wsgi:
c.stop()
def assert_near(point1, point2, error_message=""):
assert point1.keys() == point2.keys(), error_message + "Keys mismatch."
for k in point1.keys():
assert abs(point1[k] - point2[k]) < 1e-3, (
error_message + f"for {k} key, {point1[k]} != {point2[k]}"
)
def images_near(image1, image2, max_mean_pixel_diff=1, debug_save=False, filepath=""):
print("Mean pixel difference: {}, Max pixel difference: {}.".format(
np.mean(np.abs(image1 - image2).flatten()),
np.max(np.abs(image1 - image2).flatten()))
)
result = np.mean(np.abs(image1 - image2).flatten()) <= max_mean_pixel_diff
if not result and debug_save:
# TODO put images somewhere accessible
dx = np.where(~np.all(image1 == image2, axis=-1))
img_copy = image1.copy()
diff = (image1 - image2)
max = np.max(diff)
norm_diff = diff / max
img_copy[dx] = (255, 0, 255)
# for i in range(np.shape(dx)[1]):
# value = img_copy[dx[0][i], dx[0][i]]
# img_copy[dx[0][i] : dx[0][i]] = (255.0, 255.0, 255.0)
# img_copy[dx] += ((255.0, 255.0, 255.0) * norm_diff[dx])+ img_copy[dx]
test_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
debug_directory = os.path.join(os.path.join(os.getcwd(), TEST_OUTPUT_DIRECTORY))
# if os.path.exists(debug_directory):
# shutil.rmtree(debug_directory)
# os.makedirs(debug_directory)
save_image(os.path.join(debug_directory, f'{test_name}_diff.png'), img_copy)
save_image(os.path.join(debug_directory, f'{test_name}_fail.png'), image1)
print(f'Saved failed test images in "{debug_directory}"')
return result
def depth_images_near(depth1, depth2, epsilon=1e-5, debug_save=False, filepath=""):
# result = np.allclose(depth1, depth2, atol=epsilon)
result = np.mean(np.abs(depth1 - depth2).flatten()) <= epsilon
print("Max pixel difference: {}, Mean pixel difference: {}".format(
np.max((depth1 - depth2).flatten()),
np.mean((depth1 - depth2).flatten()))
)
if not result and debug_save:
depth1_gray = depth_to_gray_rgb(depth1)
depth_copy = cv2.cvtColor(depth1_gray, cv2.COLOR_GRAY2RGB)
diff = np.abs(depth1 - depth2)
max = np.max(diff)
norm_diff = diff / max
dx = np.where(np.abs(depth1 - depth2) >= epsilon)
depth_copy[dx] = (norm_diff[dx]*255, norm_diff[dx] * 0, norm_diff[dx] *255)
test_name = os.environ.get('PYTEST_CURRENT_TEST').split(':')[-1].split(' ')[0]
debug_directory = os.path.join(os.path.join(os.getcwd(), TEST_OUTPUT_DIRECTORY))
# if os.path.exists(debug_directory):
# shutil.rmtree(debug_directory)
# os.makedirs(debug_directory)
save_image(os.path.join(debug_directory, f'{test_name}_diff.png'), depth_copy)
save_image(os.path.join(debug_directory, f'{test_name}_fail.png'), depth1_gray)
np.save(
os.path.join(debug_directory, f'{test_name}_fail-raw.npy'),
depth1.astype(np.float32),
),
print(f'Saved failed test images in "{debug_directory}"')
return result
def images_far(image1, image2, min_mean_pixel_diff=10):
return np.mean(np.abs(image1 - image2).flatten()) >= min_mean_pixel_diff
def test_agent_controller_type_no_longer_accepted(fifo_controller):
with pytest.raises(ValueError):
build_controller(
server_class=FifoServer,
agentControllerType="physics",
agentMode="default",
)
# TODO: We should make ServerAction type actions fail when passed
# invalid arguments.
# with pytest.raises(Exception):
# fifo_controller.reset(agentControllerType="physics", agentMode="default")
# Issue #514 found that the thirdPartyCamera image code was causing multi-agents to end
# up with the same frame
def test_multi_agent_with_third_party_camera(fifo_controller):
fifo_controller.reset(TEST_SCENE, agentCount=2)
assert not np.all(
fifo_controller.last_event.events[1].frame
== fifo_controller.last_event.events[0].frame
)
event = fifo_controller.step(
dict(
action="AddThirdPartyCamera",
rotation=dict(x=0, y=0, z=90),
position=dict(x=-1.0, z=-2.0, y=1.0),
)
)
assert not np.all(
fifo_controller.last_event.events[1].frame
== fifo_controller.last_event.events[0].frame
)
# Issue #526 thirdPartyCamera hanging without correct keys in FifoServer FormMap
def test_third_party_camera_with_image_synthesis(fifo_controller):
fifo_controller.reset(
TEST_SCENE,
renderInstanceSegmentation=True,
renderDepthImage=True,
renderSemanticSegmentation=True,
)
event = fifo_controller.step(
dict(
action="AddThirdPartyCamera",
rotation=dict(x=0, y=0, z=90),
position=dict(x=-1.0, z=-2.0, y=1.0),
)
)
print(len(event.third_party_depth_frames))
assert len(event.third_party_depth_frames) == 1
assert len(event.third_party_semantic_segmentation_frames) == 1
assert len(event.third_party_camera_frames) == 1
assert len(event.third_party_instance_segmentation_frames) == 1
def test_rectangle_aspect(fifo_controller):
fifo_controller.reset(TEST_SCENE, width=600, height=300)
event = fifo_controller.step(dict(action="Initialize", gridSize=0.25))
assert event.frame.shape == (300, 600, 3)
def test_small_aspect(fifo_controller):
fifo_controller.reset(TEST_SCENE, width=128, height=64)
event = fifo_controller.step(dict(action="Initialize", gridSize=0.25))
assert event.frame.shape == (64, 128, 3)
def test_bot_deprecation(fifo_controller):
fifo_controller.reset(TEST_SCENE, agentMode="bot")
assert (
fifo_controller.initialization_parameters["agentMode"].lower() == "locobot"
), "bot should alias to locobot!"
def test_deprecated_segmentation_params(fifo_controller):
# renderObjectImage has been renamed to renderInstanceSegmentation
# renderClassImage has been renamed to renderSemanticSegmentation
fifo_controller.reset(
TEST_SCENE, renderObjectImage=True, renderClassImage=True,
)
event = fifo_controller.last_event
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
assert event.class_segmentation_frame is event.semantic_segmentation_frame
assert event.semantic_segmentation_frame is not None
assert (
event.instance_segmentation_frame is not None
), "renderObjectImage should still render instance_segmentation_frame"
def test_deprecated_segmentation_params2(fifo_controller):
# renderObjectImage has been renamed to renderInstanceSegmentation
# renderClassImage has been renamed to renderSemanticSegmentation
fifo_controller.reset(
TEST_SCENE, renderSemanticSegmentation=True, renderInstanceSegmentation=True,
)
event = fifo_controller.last_event
with warnings.catch_warnings():
warnings.simplefilter("ignore", category=DeprecationWarning)
assert event.class_segmentation_frame is event.semantic_segmentation_frame
assert event.semantic_segmentation_frame is not None
assert (
event.instance_segmentation_frame is not None
), "renderObjectImage should still render instance_segmentation_frame"
def test_reset(fifo_controller):
width = 520
height = 310
event = fifo_controller.reset(
scene=TEST_SCENE, width=width, height=height, renderDepthImage=True
)
assert event.frame.shape == (height, width, 3), "RGB frame dimensions are wrong!"
assert event.depth_frame is not None, "depth frame should have rendered!"
assert event.depth_frame.shape == (
height,
width,
), "depth frame dimensions are wrong!"
width = 300
height = 300
event = fifo_controller.reset(
scene=TEST_SCENE, width=width, height=height, renderDepthImage=False
)
assert event.depth_frame is None, "depth frame shouldn't have rendered!"
assert event.frame.shape == (height, width, 3), "RGB frame dimensions are wrong!"
def test_fast_emit(fifo_controller):
event = fifo_controller.step(dict(action="RotateRight"))
event_fast_emit = fifo_controller.step(dict(action="TestFastEmit", rvalue="foo"))
event_no_fast_emit = fifo_controller.step(dict(action="LookUp"))
event_no_fast_emit_2 = fifo_controller.step(dict(action="RotateRight"))
assert event.metadata["actionReturn"] is None
assert event_fast_emit.metadata["actionReturn"] == "foo"
assert id(event.metadata["objects"]) == id(event_fast_emit.metadata["objects"])
assert id(event.metadata["objects"]) != id(event_no_fast_emit.metadata["objects"])
assert id(event_no_fast_emit_2.metadata["objects"]) != id(
event_no_fast_emit.metadata["objects"]
)
def test_fifo_large_input(fifo_controller):
random_string = "".join(
random.choice(string.ascii_letters) for i in range(1024 * 16)
)
event = fifo_controller.step(
dict(action="TestActionReflectParam", rvalue=random_string)
)
assert event.metadata["actionReturn"] == random_string
def test_fast_emit_disabled(fifo_controller):
slow_controller = fifo_controller
slow_controller.reset(TEST_SCENE, fastActionEmit=False)
event = slow_controller.step(dict(action="RotateRight"))
event_fast_emit = slow_controller.step(dict(action="TestFastEmit", rvalue="foo"))
# assert that when actionFastEmit is off that the objects are different
assert id(event.metadata["objects"]) != id(event_fast_emit.metadata["objects"])
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_lookdown(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
assert horizon == 0.0
e = controller.step(dict(action="LookDown"))
assert e.metadata["agent"]["position"] == position
assert round(e.metadata["agent"]["cameraHorizon"]) == 30
assert e.metadata["agent"]["rotation"] == dict(x=0, y=0, z=0)
e = controller.step(dict(action="LookDown"))
assert round(e.metadata["agent"]["cameraHorizon"]) == 60
e = controller.step(dict(action="LookDown"))
assert round(e.metadata["agent"]["cameraHorizon"]) == 60
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_no_leak_params(controller):
action = dict(action="RotateLook", rotation=0, horizon=0)
e = controller.step(action)
assert "sequenceId" not in action
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_target_invocation_exception(controller):
# TargetInvocationException is raised when short circuiting failures occur
# on the Unity side. It often occurs when invalid arguments are used.
event = controller.step("OpenObject", x=1.5, y=0.5)
assert not event.metadata["lastActionSuccess"], "OpenObject(x > 1) should fail."
assert event.metadata[
"errorMessage"
], "errorMessage should not be empty when OpenObject(x > 1)."
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_lookup(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
assert horizon == 0.0
e = controller.step(dict(action="LookUp"))
assert e.metadata["agent"]["position"] == position
assert e.metadata["agent"]["cameraHorizon"] == -30.0
assert e.metadata["agent"]["rotation"] == dict(x=0, y=0, z=0)
e = controller.step(dict(action="LookUp"))
assert e.metadata["agent"]["cameraHorizon"] == -30.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_rotate_left(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
rotation = controller.last_event.metadata["agent"]["rotation"]
assert rotation == dict(x=0, y=0, z=0)
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
e = controller.step(dict(action="RotateLeft"))
assert e.metadata["agent"]["position"] == position
assert e.metadata["agent"]["cameraHorizon"] == horizon
assert e.metadata["agent"]["rotation"]["y"] == 270.0
assert e.metadata["agent"]["rotation"]["x"] == 0.0
assert e.metadata["agent"]["rotation"]["z"] == 0.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_simobj_filter(controller):
objects = controller.last_event.metadata["objects"]
unfiltered_object_ids = sorted([o["objectId"] for o in objects])
filter_object_ids = sorted([o["objectId"] for o in objects[0:3]])
e = controller.step(dict(action="SetObjectFilter", objectIds=filter_object_ids))
assert len(e.metadata["objects"]) == len(filter_object_ids)
filtered_object_ids = sorted([o["objectId"] for o in e.metadata["objects"]])
assert filtered_object_ids == filter_object_ids
e = controller.step(dict(action="SetObjectFilter", objectIds=[]))
assert len(e.metadata["objects"]) == 0
e = controller.step(dict(action="ResetObjectFilter"))
reset_filtered_object_ids = sorted([o["objectId"] for o in e.metadata["objects"]])
assert unfiltered_object_ids == reset_filtered_object_ids
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_add_third_party_camera(controller):
expectedPosition = dict(x=1.2, y=2.3, z=3.4)
expectedRotation = dict(x=30, y=40, z=50)
expectedFieldOfView = 45.0
assert (
len(controller.last_event.metadata[MultiAgentMetadata.thirdPartyCameras]) == 0
), "there should be 0 cameras"
e = controller.step(
dict(
action=Actions.AddThirdPartyCamera,
position=expectedPosition,
rotation=expectedRotation,
fieldOfView=expectedFieldOfView,
)
)
assert (
len(e.metadata[MultiAgentMetadata.thirdPartyCameras]) == 1
), "there should be 1 camera"
camera = e.metadata[MultiAgentMetadata.thirdPartyCameras][0]
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"initial position should have been set",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"initial rotation should have been set",
)
assert (
camera[ThirdPartyCameraMetadata.fieldOfView] == expectedFieldOfView
), "initial fieldOfView should have been set"
# expects position to be a Vector3, should fail!
event = controller.step(
action="AddThirdPartyCamera", position=5, rotation=dict(x=0, y=0, z=0)
)
assert not event.metadata[
"lastActionSuccess"
], "position should not allow float input!"
# orthographicSize expects float, not Vector3!
error_message = None
try:
event = controller.step(
action="AddThirdPartyCamera",
position=dict(x=0, y=0, z=0),
rotation=dict(x=0, y=0, z=0),
orthographic=True,
orthographicSize=dict(x=0, y=0, z=0),
)
except ValueError as e:
error_message = str(e)
assert error_message.startswith(
"action: AddThirdPartyCamera has an invalid argument: orthographicSize"
)
def test_third_party_camera_depth(fifo_controller):
fifo_controller.reset(TEST_SCENE, width=300, height=300, renderDepthImage=True)
agent_position = {"x": -2.75, "y": 0.9009982347488403, "z": -1.75}
agent_rotation = {"x": 0.0, "y": 90.0, "z": 0.0}
agent_init_position = {"x": -2.75, "y": 0.9009982347488403, "z": -1.25}
camera_position = {"x": -2.75, "y": 1.5759992599487305, "z": -1.75}
camera_rotation = {"x": 0.0, "y": 90.0, "z": 0.0}
# teleport agent into a position the third-party camera won't see
fifo_controller.step(
action="Teleport",
position=agent_init_position,
rotation=agent_rotation,
horizon=0.0,
standing=True,
)
camera_event = fifo_controller.step(
dict(
action=Actions.AddThirdPartyCamera,
position=camera_position,
rotation=camera_rotation,
)
)
camera_depth = camera_event.third_party_depth_frames[0]
agent_event = fifo_controller.step(
action="Teleport",
position=agent_position,
rotation=agent_rotation,
horizon=0.0,
standing=True,
)
agent_depth = agent_event.depth_frame
mse = np.square((np.subtract(camera_depth, agent_depth))).mean()
# if the clipping planes aren't the same between the agent and third-party camera
# the mse will be > 1.0
assert mse < 0.0001
def test_update_third_party_camera(fifo_controller):
# add a new camera
expectedPosition = dict(x=1.2, y=2.3, z=3.4)
expectedRotation = dict(x=30, y=40, z=50)
expectedFieldOfView = 45.0
e = fifo_controller.step(
dict(
action=Actions.AddThirdPartyCamera,
position=expectedPosition,
rotation=expectedRotation,
fieldOfView=expectedFieldOfView,
)
)
assert (
len(fifo_controller.last_event.metadata[MultiAgentMetadata.thirdPartyCameras])
== 1
), "there should be 1 camera"
# update camera pose fully
expectedPosition = dict(x=2.2, y=3.3, z=4.4)
expectedRotation = dict(x=10, y=20, z=30)
expectedInitialFieldOfView = 45.0
e = fifo_controller.step(
dict(
action=Actions.UpdateThirdPartyCamera,
thirdPartyCameraId=0,
position=expectedPosition,
rotation=expectedRotation,
)
)
camera = e.metadata[MultiAgentMetadata.thirdPartyCameras][0]
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"position should have been updated",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"rotation should have been updated",
)
assert (
camera[ThirdPartyCameraMetadata.fieldOfView] == expectedInitialFieldOfView
), "fieldOfView should not have changed"
# partially update the camera pose
changeFOV = 55.0
expectedPosition2 = dict(x=3.2, z=5)
expectedRotation2 = dict(y=90)
e = fifo_controller.step(
action=Actions.UpdateThirdPartyCamera,
thirdPartyCameraId=0,
fieldOfView=changeFOV,
position=expectedPosition2,
rotation=expectedRotation2,
)
camera = e.metadata[MultiAgentMetadata.thirdPartyCameras][0]
assert (
camera[ThirdPartyCameraMetadata.fieldOfView] == changeFOV
), "fieldOfView should have been updated"
expectedPosition.update(expectedPosition2)
expectedRotation.update(expectedRotation2)
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"position should been slightly updated",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"rotation should been slightly updated",
)
for fov in [-1, 181, 0]:
e = fifo_controller.step(
dict(
action=Actions.UpdateThirdPartyCamera,
thirdPartyCameraId=0,
fieldOfView=fov,
)
)
assert not e.metadata[
"lastActionSuccess"
], "fieldOfView should fail outside of (0, 180)"
assert_near(
camera[ThirdPartyCameraMetadata.position],
expectedPosition,
"position should not have updated",
)
assert_near(
camera[ThirdPartyCameraMetadata.rotation],
expectedRotation,
"rotation should not have updated",
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_rotate_look(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
rotation = controller.last_event.metadata["agent"]["rotation"]
assert rotation == dict(x=0, y=0, z=0)
e = controller.step(dict(action="RotateLook", rotation=90, horizon=31))
assert e.metadata["agent"]["position"] == position
assert int(e.metadata["agent"]["cameraHorizon"]) == 31
assert e.metadata["agent"]["rotation"]["y"] == 90.0
assert e.metadata["agent"]["rotation"]["x"] == 0.0
assert e.metadata["agent"]["rotation"]["z"] == 0.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_rotate_right(controller):
e = controller.step(dict(action="RotateLook", rotation=0, horizon=0))
position = controller.last_event.metadata["agent"]["position"]
rotation = controller.last_event.metadata["agent"]["rotation"]
assert rotation == dict(x=0, y=0, z=0)
horizon = controller.last_event.metadata["agent"]["cameraHorizon"]
e = controller.step(dict(action="RotateRight"))
assert e.metadata["agent"]["position"] == position
assert e.metadata["agent"]["cameraHorizon"] == horizon
assert e.metadata["agent"]["rotation"]["y"] == 90.0
assert e.metadata["agent"]["rotation"]["x"] == 0.0
assert e.metadata["agent"]["rotation"]["z"] == 0.0
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open_aabb_cache(controller):
objects = controller.last_event.metadata["objects"]
obj = next(obj for obj in objects if obj["objectType"] == "Fridge")
start_aabb = obj["axisAlignedBoundingBox"]
open_event = controller.step(
action="OpenObject",
objectId=obj["objectId"],
forceAction=True,
raise_for_failure=True,
)
obj = next(
obj for obj in open_event.metadata["objects"] if obj["objectType"] == "Fridge"
)
open_aabb = obj["axisAlignedBoundingBox"]
assert start_aabb["size"] != open_aabb["size"]
close_event = controller.step(
action="CloseObject",
objectId=obj["objectId"],
forceAction=True,
raise_for_failure=True,
)
obj = next(
obj for obj in close_event.metadata["objects"] if obj["objectType"] == "Fridge"
)
close_aabb = obj["axisAlignedBoundingBox"]
assert start_aabb["size"] == close_aabb["size"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_toggle_stove(controller):
position = {"x": -1.0, "y": 0.9009982347488403, "z": -2.25}
action = position.copy()
action["rotation"] = dict(y=90)
action["horizon"] = 30.0
action["standing"] = True
action["action"] = "TeleportFull"
event = controller.step(action, raise_for_failure=True)
knob = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "StoveKnob" and obj["visible"]
)
assert not knob["isToggled"], "knob should not be toggled"
assert knob["visible"]
event = controller.step(
dict(action="ToggleObjectOn", objectId=knob["objectId"]), raise_for_failure=True
)
knob = event.get_object(knob["objectId"])
assert knob["isToggled"], "knob should be toggled"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open_interactable_with_filter(controller):
position = {"x": -1.0, "y": 0.9009982347488403, "z": -0.5}
action = position.copy()
action["rotation"] = dict(y=90)
action["horizon"] = 0
action["standing"] = True
action["action"] = "TeleportFull"
controller.step(action, raise_for_failure=True)
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["visible"], "Object is not interactable!"
assert_near(controller.last_event.metadata["agent"]["position"], position)
controller.step(dict(action="SetObjectFilter", objectIds=[]))
assert controller.last_event.metadata["objects"] == []
controller.step(
action="OpenObject", objectId=fridge["objectId"], raise_for_failure=True,
)
controller.step(dict(action="ResetObjectFilter"))
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["isOpen"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open_interactable(controller):
position = {"x": -1.0, "y": 0.9009982347488403, "z": -0.5}
action = position.copy()
action["rotation"] = dict(y=90)
action["horizon"] = 0
action["standing"] = True
action["action"] = "TeleportFull"
controller.step(action, raise_for_failure=True)
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["visible"], "Object is not interactable!"
assert_near(controller.last_event.metadata["agent"]["position"], position)
event = controller.step(
action="OpenObject", objectId=fridge["objectId"], raise_for_failure=True,
)
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["isOpen"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_open(controller):
objects = controller.last_event.metadata["objects"]
obj_to_open = next(obj for obj in objects if obj["objectType"] == "Fridge")
# helper that returns obj_to_open from a new event
def get_object(event, object_id):
return next(
obj for obj in event.metadata["objects"] if obj["objectId"] == object_id
)
for openness in [0.5, 0.7, 0]:
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
openness=openness,
forceAction=True,
raise_for_failure=True,
)
opened_obj = get_object(event, obj_to_open["objectId"])
assert abs(opened_obj["openness"] - openness) < 1e-3, "Incorrect openness!"
assert opened_obj["isOpen"] == (openness != 0), "isOpen incorrectly reported!"
# test bad openness values
for bad_openness in [-0.5, 1.5]:
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
openness=bad_openness,
forceAction=True,
)
assert not event.metadata[
"lastActionSuccess"
], "0.0 > Openness > 1.0 should fail!"
# test backwards compatibility on moveMagnitude, where moveMagnitude
# is now `openness`, but when moveMagnitude = 0 that corresponds to openness = 1.
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
forceAction=True,
moveMagnitude=0,
)
opened_obj = get_object(event, obj_to_open["objectId"])
assert (
abs(opened_obj["openness"] - 1) < 1e-3
), "moveMagnitude=0 must have openness=1"
assert opened_obj["isOpen"], "moveMagnitude isOpen incorrectly reported!"
# another moveMagnitude check
test_openness = 0.65
event = controller.step(
action="OpenObject",
objectId=obj_to_open["objectId"],
forceAction=True,
moveMagnitude=test_openness,
)
opened_obj = get_object(event, obj_to_open["objectId"])
assert (
abs(opened_obj["openness"] - test_openness) < 1e-3
), "moveMagnitude is not working!"
assert opened_obj["isOpen"], "moveMagnitude isOpen incorrectly reported!"
# a CloseObject specific check
event = controller.step(
action="CloseObject", objectId=obj_to_open["objectId"], forceAction=True
)
obj = get_object(event, obj_to_open["objectId"])
assert abs(obj["openness"] - 0) < 1e-3, "CloseObject openness should be 0"
assert not obj["isOpen"], "CloseObject should report isOpen==false!"
def test_action_dispatch(fifo_controller):
controller = fifo_controller
event = controller.step(
dict(action="TestActionDispatchFindAmbiguous"),
typeName="UnityStandardAssets.Characters.FirstPerson.PhysicsRemoteFPSAgentController",
)
known_ambig = sorted(
[
"TestActionDispatchSAAmbig",
"TestActionDispatchSAAmbig2",
"ProcessControlCommand",
]
)
assert sorted(event.metadata["actionReturn"]) == known_ambig
skip_reset(fifo_controller)
def test_action_dispatch_find_ambiguous_stochastic(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchFindAmbiguous"),
typeName="UnityStandardAssets.Characters.FirstPerson.LocobotFPSAgentController",
)
known_ambig = sorted(
[
"TestActionDispatchSAAmbig",
"TestActionDispatchSAAmbig2",
"ProcessControlCommand",
]
)
assert sorted(event.metadata["actionReturn"]) == known_ambig
skip_reset(fifo_controller)
def test_action_dispatch_server_action_ambiguous2(fifo_controller):
exception_thrown = False
exception_message = None
try:
fifo_controller.step("TestActionDispatchSAAmbig2")
except ValueError as e:
exception_thrown = True
exception_message = str(e)
assert exception_thrown
assert (
"Ambiguous action: TestActionDispatchSAAmbig2 Signature match found in the same class"
== exception_message
)
skip_reset(fifo_controller)
def test_action_dispatch_server_action_ambiguous(fifo_controller):
exception_thrown = False
exception_message = None
try:
fifo_controller.step("TestActionDispatchSAAmbig")
except ValueError as e:
exception_thrown = True
exception_message = str(e)
assert exception_thrown
assert (
exception_message
== "Ambiguous action: TestActionDispatchSAAmbig Mixing a ServerAction method with overloaded methods is not permitted"
)
skip_reset(fifo_controller)
def test_action_dispatch_find_conflicts_stochastic(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchFindConflicts"),
typeName="UnityStandardAssets.Characters.FirstPerson.LocobotFPSAgentController",
)
known_conflicts = {
"TestActionDispatchConflict": ["param22"],
}
assert event.metadata["actionReturn"] == known_conflicts
skip_reset(fifo_controller)
def test_action_dispatch_find_conflicts_physics(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchFindConflicts"),
typeName="UnityStandardAssets.Characters.FirstPerson.PhysicsRemoteFPSAgentController",
)
known_conflicts = {
"TestActionDispatchConflict": ["param22"],
}
assert event.metadata["actionReturn"] == known_conflicts
skip_reset(fifo_controller)
def test_action_dispatch_missing_args(fifo_controller):
caught_exception = False
try:
event = fifo_controller.step(
dict(action="TestActionDispatchNoop", param6="foo")
)
except ValueError as e:
caught_exception = True
assert caught_exception
assert fifo_controller.last_event.metadata["errorCode"] == "MissingArguments"
skip_reset(fifo_controller)
def test_action_dispatch_invalid_action(fifo_controller):
caught_exception = False
try:
event = fifo_controller.step(dict(action="TestActionDispatchNoopFoo"))
except ValueError as e:
caught_exception = True
assert caught_exception
assert fifo_controller.last_event.metadata["errorCode"] == "InvalidAction"
skip_reset(fifo_controller)
def test_action_dispatch_empty(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoop"))
assert event.metadata["actionReturn"] == "emptyargs"
skip_reset(fifo_controller)
def test_action_disptatch_one_param(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoop", param1=True))
assert event.metadata["actionReturn"] == "param1"
skip_reset(fifo_controller)
def test_action_disptatch_two_param(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoop", param1=True, param2=False)
)
assert event.metadata["actionReturn"] == "param1 param2"
skip_reset(fifo_controller)
def test_action_disptatch_two_param_with_default(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoop2", param3=True, param4="foobar")
)
assert event.metadata["actionReturn"] == "param3 param4/default foobar"
skip_reset(fifo_controller)
def test_action_disptatch_two_param_with_default_empty(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoop2", param3=True))
assert event.metadata["actionReturn"] == "param3 param4/default foo"
skip_reset(fifo_controller)
def test_action_disptatch_serveraction_default(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoopServerAction"))
assert event.metadata["actionReturn"] == "serveraction"
skip_reset(fifo_controller)
def test_action_disptatch_serveraction_with_object_id(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoopServerAction", objectId="candle|1|2|3")
)
assert event.metadata["actionReturn"] == "serveraction"
skip_reset(fifo_controller)
def test_action_disptatch_all_default(fifo_controller):
event = fifo_controller.step(dict(action="TestActionDispatchNoopAllDefault"))
assert event.metadata["actionReturn"] == "alldefault"
skip_reset(fifo_controller)
def test_action_disptatch_some_default(fifo_controller):
event = fifo_controller.step(
dict(action="TestActionDispatchNoopAllDefault2", param12=9.0)
)
assert event.metadata["actionReturn"] == "somedefault"
skip_reset(fifo_controller)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveahead(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveAhead"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.5, z=-1.25, y=0.901))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveback(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveBack"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.5, z=-1.75, y=0.900998652))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveleft(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveLeft"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.75, z=-1.5, y=0.901))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveright(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveRight"), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.25, z=-1.5, y=0.901))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveahead_mag(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveAhead", moveMagnitude=0.5), raise_for_failure=True)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-1.5, z=-1, y=0.9009983))
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_moveahead_fail(controller):
teleport_to_base_location(controller)
controller.step(dict(action="MoveAhead", moveMagnitude=5.0))
assert not controller.last_event.metadata["lastActionSuccess"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_jsonschema_metadata(controller):
event = controller.step(dict(action="Pass"))
with open(os.path.join(TESTS_DATA_DIR, "metadata-schema.json")) as f:
schema = json.loads(f.read())
jsonschema.validate(instance=event.metadata, schema=schema)
skip_reset(controller)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_drone_jsonschema_metadata(controller):
controller.reset(agentMode="drone")
event = controller.step(action="Pass")
with open(os.path.join(TESTS_DATA_DIR, "drone-metadata-schema.json")) as f:
schema = json.loads(f.read())
jsonschema.validate(instance=event.metadata, schema=schema)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_arm_jsonschema_metadata(controller):
controller.reset(agentMode="arm")
event = controller.step(action="Pass")
with open(os.path.join(TESTS_DATA_DIR, "arm-metadata-schema.json")) as f:
schema = json.loads(f.read())
jsonschema.validate(instance=event.metadata, schema=schema)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_scenes_in_build(controller):
scenes = set()
for g in glob.glob("unity/Assets/Scenes/*.unity"):
# we currently ignore the 5xx scenes since they are not being worked on
if not re.match(r'^.*\/FloorPlan5[0-9]+_', g):
scenes.add(os.path.splitext(os.path.basename(g))[0])
event = controller.step(dict(action="GetScenesInBuild"), raise_for_failure=True)
return_scenes = set(event.metadata["actionReturn"])
# not testing for private scenes
diff = scenes - return_scenes
assert len(diff) == 0, "scenes in build diff: %s" % diff
skip_reset(controller)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_reachable_positions(controller):
event = controller.step("GetReachablePositions")
assert (
event.metadata["actionReturn"] == event.metadata["reachablePositions"]
), "reachablePositions should map to actionReturn!"
assert len(event.metadata["reachablePositions"]) > 0 and isinstance(
event.metadata["reachablePositions"], list
), "reachablePositions/actionReturn should not be empty after calling GetReachablePositions!"
assert "reachablePositions" not in event.metadata.keys()
event = controller.step("Pass")
try:
event.metadata["reachablePositions"]
assert (
False
), "reachablePositions shouldn't be available without calling action='GetReachablePositions'."
except:
pass
def test_per_step_instance_segmentation(fifo_controller):
fifo_controller.reset(
TEST_SCENE, width=300, height=300, renderInstanceSegmentation=False
)
event = fifo_controller.step("RotateRight")
assert event.instance_segmentation_frame is None
event = fifo_controller.step("Pass", renderInstanceSegmentation=True)
assert event.instance_segmentation_frame is not None
# Test for Issue: 477
@pytest.mark.skip(reason="Winson is debugging why it fails.")
def test_change_resolution_image_synthesis(fifo_controller):
fifo_controller.reset(
TEST_SCENE,
width=300,
height=300,
renderInstanceSegmentation=True,
renderDepthImage=True,
renderSemanticSegmentation=True,
)
fifo_controller.step("RotateRight")
fifo_controller.step("RotateLeft")
fifo_controller.step("RotateRight")
first_event = fifo_controller.last_event
first_depth_frame = fifo_controller.last_event.depth_frame
first_instance_frame = fifo_controller.last_event.instance_segmentation_frame
first_sem_frame = fifo_controller.last_event.semantic_segmentation_frame
event = fifo_controller.step(action="ChangeResolution", x=500, y=500)
assert event.depth_frame.shape == (500, 500)
assert event.instance_segmentation_frame.shape == (500, 500, 3)
assert event.semantic_segmentation_frame.shape == (500, 500, 3)
event = fifo_controller.step(action="ChangeResolution", x=300, y=300)
assert event.depth_frame.shape == (300, 300)
assert event.instance_segmentation_frame.shape == (300, 300, 3)
assert event.semantic_segmentation_frame.shape == (300, 300, 3)
print("is none? {0} is none other {1} ".format( event.depth_frame is None, first_depth_frame is None))
save_image("depth_after_resolution_change_300_300.png", depth_to_gray_rgb(event.depth_frame))
save_image("before_after_resolution_change_300_300.png", depth_to_gray_rgb(first_depth_frame))
assert np.allclose(event.depth_frame, first_depth_frame, atol=0.001)
assert np.array_equal(event.instance_segmentation_frame, first_instance_frame)
assert np.array_equal(event.semantic_segmentation_frame, first_sem_frame)
assert first_event.color_to_object_id == event.color_to_object_id
assert first_event.object_id_to_color == event.object_id_to_color
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_change_resolution(controller):
event = controller.step(dict(action="Pass"), raise_for_failure=True)
assert event.frame.shape == (300, 300, 3)
event = controller.step(
dict(action="ChangeResolution", x=400, y=400), raise_for_failure=True
)
assert event.frame.shape == (400, 400, 3)
assert event.screen_width == 400
assert event.screen_height == 400
event = controller.step(
dict(action="ChangeResolution", x=300, y=300), raise_for_failure=True
)
@pytest.mark.parametrize("controller", fifo)
def test_teleport(controller):
# Checking y coordinate adjustment works
controller.step(
"TeleportFull", **{**BASE_FP28_LOCATION, "y": 0.95}, raise_for_failure=True
)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, BASE_FP28_POSITION)
controller.step(
"TeleportFull",
**{**BASE_FP28_LOCATION, "x": -2.0, "z": -2.5, "y": 0.95},
raise_for_failure=True,
)
position = controller.last_event.metadata["agent"]["position"]
assert_near(position, dict(x=-2.0, z=-2.5, y=0.901))
# Teleporting too high
before_position = controller.last_event.metadata["agent"]["position"]
controller.step(
"Teleport", **{**BASE_FP28_LOCATION, "y": 1.0},
)
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Teleport should not allow changes for more than 0.05 in the y coordinate."
assert (
controller.last_event.metadata["agent"]["position"] == before_position
), "After failed teleport, the agent's position should not change."
# Teleporting into an object
controller.step(
"Teleport", **{**BASE_FP28_LOCATION, "z": -3.5},
)
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Should not be able to teleport into an object."
# Teleporting into a wall
controller.step(
"Teleport", **{**BASE_FP28_LOCATION, "z": 0},
)
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Should not be able to teleport into a wall."
# DEFAULT AGENT TEST
# make sure Teleport works with default args
a1 = controller.last_event.metadata["agent"]
a2 = controller.step("Teleport", horizon=10).metadata["agent"]
assert abs(a2["cameraHorizon"] - 10) < 1e-2, "cameraHorizon should be ~10!"
# all should be the same except for horizon
assert_near(a1["position"], a2["position"])
assert_near(a1["rotation"], a2["rotation"])
assert (
a1["isStanding"] == a2["isStanding"]
), "Agent should remain in same standing when unspecified!"
assert a1["isStanding"] != None, "Agent isStanding should be set for physics agent!"
# make sure float rotation works
# TODO: readd this when it actually works
# agent = controller.step('TeleportFull', rotation=25).metadata['agent']
# assert_near(agent['rotation']['y'], 25)
# test out of bounds with default agent
for action in ["Teleport", "TeleportFull"]:
try:
controller.step(
action="TeleportFull",
position=dict(x=2000, y=0, z=9000),
rotation=dict(x=0, y=90, z=0),
horizon=30,
raise_for_failure=True,
)
assert False, "Out of bounds teleport not caught by physics agent"
except:
pass
# Teleporting with the locobot and drone, which don't support standing
for agent in ["locobot", "drone"]:
event = controller.reset(agentMode=agent)
assert event.metadata["agent"]["isStanding"] is None, agent + " cannot stand!"
# Only degrees of freedom on the locobot
for action in ["Teleport", "TeleportFull"]:
event = controller.step(
action=action,
position=dict(x=-1.5, y=0.9, z=-1.5),
rotation=dict(x=0, y=90, z=0),
horizon=30,
)
assert event.metadata["lastActionSuccess"], (
agent + " must be able to TeleportFull without passing in standing!"
)
try:
event = controller.step(
action=action,
position=dict(x=-1.5, y=0.9, z=-1.5),
rotation=dict(x=0, y=90, z=0),
horizon=30,
standing=True,
)
assert False, (
agent + " should not be able to pass in standing to teleport!"
)
except:
pass
# test out of bounds with default agent
try:
controller.step(
action=action,
position=dict(x=2000, y=0, z=9000),
rotation=dict(x=0, y=90, z=0),
horizon=30,
raise_for_failure=True,
)
assert False, "Out of bounds teleport not caught by physics agent"
except:
pass
# make sure Teleport works with default args
a1 = controller.last_event.metadata["agent"]
a2 = controller.step("Teleport", horizon=10).metadata["agent"]
assert abs(a2["cameraHorizon"] - 10) < 1e-2, "cameraHorizon should be ~10!"
# all should be the same except for horizon
assert_near(a1["position"], a2["position"])
assert_near(a1["rotation"], a2["rotation"])
# TODO: readd this when it actually works.
# make sure float rotation works
# if agent == "locobot":
# agent = controller.step('TeleportFull', rotation=25).metadata['agent']
# assert_near(agent['rotation']['y'], 25)
controller.reset(agentMode="default")
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_interactable_poses(controller):
fridgeId = next(
obj["objectId"]
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
event = controller.step("GetInteractablePoses", objectId=fridgeId)
poses = event.metadata["actionReturn"]
assert (
600 > len(poses) > 400
), "Should have around 400 interactable poses next to the fridge!"
# teleport to a random pose
pose = poses[len(poses) // 2]
event = controller.step("TeleportFull", **pose)
# assumes 1 fridge in the scene
fridge = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Fridge"
)
assert fridge["visible"], "Object is not interactable!"
# tests that teleport correctly works with **syntax
assert (
abs(pose["x"] - event.metadata["agent"]["position"]["x"]) < 1e-3
), "Agent x position off!"
assert (
abs(pose["z"] - event.metadata["agent"]["position"]["z"]) < 1e-3
), "Agent z position off!"
assert (
abs(pose["rotation"] - event.metadata["agent"]["rotation"]["y"]) < 1e-3
), "Agent rotation off!"
assert (
abs(pose["horizon"] - event.metadata["agent"]["cameraHorizon"]) < 1e-3
), "Agent horizon off!"
assert (
pose["standing"] == event.metadata["agent"]["isStanding"]
), "Agent's isStanding is off!"
# potato should be inside of the fridge (and, thus, non interactable)
potatoId = next(
obj["objectId"]
for obj in controller.last_event.metadata["objects"]
if obj["objectType"] == "Potato"
)
event = controller.step("GetInteractablePoses", objectId=potatoId)
assert (
len(event.metadata["actionReturn"]) == 0
), "Potato is inside of fridge, and thus, shouldn't be interactable"
assert event.metadata[
"lastActionSuccess"
], "GetInteractablePoses with Potato shouldn't have failed!"
# assertion for maxPoses
event = controller.step("GetInteractablePoses", objectId=fridgeId, maxPoses=50)
assert len(event.metadata["actionReturn"]) == 50, "maxPoses should be capped at 50!"
# assert only checking certain horizons and rotations is working correctly
horizons = [0, 30]
rotations = [0, 45]
event = controller.step(
"GetInteractablePoses",
objectId=fridgeId,
horizons=horizons,
rotations=rotations,
)
for pose in event.metadata["actionReturn"]:
horizon_works = False
for horizon in horizons:
if abs(pose["horizon"] - horizon) < 1e-3:
horizon_works = True
break
assert horizon_works, "Not expecting horizon: " + pose["horizon"]
rotation_works = False
for rotation in rotations:
if abs(pose["rotation"] - rotation) < 1e-3:
rotation_works = True
break
assert rotation_works, "Not expecting rotation: " + pose["rotation"]
# assert only checking certain horizons and rotations is working correctly
event = controller.step("GetInteractablePoses", objectId=fridgeId, rotations=[270])
assert (
len(event.metadata["actionReturn"]) == 0
), "Fridge shouldn't be viewable from this rotation!"
assert event.metadata[
"lastActionSuccess"
], "GetInteractablePoses with Fridge shouldn't have failed!"
# test maxDistance
event = controller.step("GetInteractablePoses", objectId=fridgeId, maxDistance=5)
assert (
1300 > len(event.metadata["actionReturn"]) > 1100
), "GetInteractablePoses with large maxDistance is off!"
@pytest.mark.parametrize("controller", fifo)
def test_2d_semantic_hulls(controller):
from shapely.geometry import Polygon
controller.reset(TEST_SCENE)
obj_name_to_obj_id = {
o["name"]: o["objectId"] for o in controller.last_event.metadata["objects"]
}
# Used to save fixed object locations.
# with open("ai2thor/tests/data/floorplan28-fixed-obj-poses.json", "w") as f:
# json.dump(
# [
# {k: o[k] for k in ["name", "position", "rotation"]}
# for o in controller.last_event.metadata["objects"]
# ],
# f
# )
with open("ai2thor/tests/data/floorplan28-fixed-obj-poses.json", "r") as f:
fixed_obj_poses = json.load(f)
for o in fixed_obj_poses:
teleport_success = controller.step(
"TeleportObject",
objectId=obj_name_to_obj_id[o["name"]],
position=o["position"],
rotation=o["rotation"],
forceAction=True,
forceKinematic=True,
makeUnbreakable=True,
).metadata["lastActionSuccess"]
assert teleport_success
object_types = ["Tomato", "Drawer", "Fridge"]
object_ids = [
"Mug|-03.15|+00.82|-03.47",
"Faucet|-00.39|+00.93|-03.61",
"StoveBurner|-00.22|+00.92|-01.85",
]
def get_rounded_hulls(**kwargs):
if "objectId" in kwargs:
md = controller.step("Get2DSemanticHull", **kwargs).metadata
else:
md = controller.step("Get2DSemanticHulls", **kwargs).metadata
assert md["lastActionSuccess"] and md["errorMessage"] == ""
hulls = md["actionReturn"]
if isinstance(hulls, list):
return np.array(hulls, dtype=float).round(4).tolist()
else:
return {
k: np.array(v, dtype=float).round(4).tolist()
for k, v in md["actionReturn"].items()
}
# All objects
hulls_all = get_rounded_hulls()
# Filtering by object types
hulls_type_filtered = get_rounded_hulls(objectTypes=object_types)
# Filtering by object ids
hulls_id_filtered = get_rounded_hulls(objectIds=object_ids)
# Single object id
hulls_single_object = get_rounded_hulls(objectId=object_ids[0])
# Used to save the ground truth values:
# objects = controller.last_event.metadata["objects"]
# objects_poses = [
# {"objectName": o["name"], "position": o["position"], "rotation": o["rotation"]} for o in objects
# ]
# print(controller.step("SetObjectPoses", objectPoses=objects_poses).metadata)
# with open("ai2thor/tests/data/semantic-2d-hulls.json", "w") as f:
# json.dump(
# {
# "all": hulls_all,
# "type_filtered": hulls_type_filtered,
# "id_filtered": hulls_id_filtered,
# "single_object": hulls_single_object,
# },
# f
# )
with open("ai2thor/tests/data/semantic-2d-hulls.json") as f:
truth = json.load(f)
def assert_almost_equal(a, b):
if isinstance(a, list):
pa = Polygon(a)
pb = Polygon(b)
pa_area = pa.area
pb_area = pb.area
sym_diff_area = pa.symmetric_difference(pb).area
# TODO: There seems to be a difference in the geometry reported by Unity when in
# Linux vs Mac. I've had to increase the below check to the relatively generous <0.02
# to get this test to pass.
assert sym_diff_area / max([1e-6, pa_area, pb_area]) < 2e-2, (
f"Polygons have to large an area ({sym_diff_area}) in their symmetric difference"
f" compared to their sizes ({pa_area}, {pb_area}). Hulls:\n"
f"{json.dumps(a)}\n"
f"{json.dumps(b)}\n"
)
else:
for k in set(a.keys()) | set(b.keys()):
try:
assert_almost_equal(a[k], b[k])
except AssertionError as e:
raise AssertionError(f"For {k}: {e.args[0]}")
assert_almost_equal(truth["all"], hulls_all)
assert_almost_equal(truth["type_filtered"], hulls_type_filtered)
assert_almost_equal(truth["id_filtered"], hulls_id_filtered)
assert_almost_equal(truth["single_object"], hulls_single_object)
# Should fail when given types and ids
assert not controller.step(
"Get2DSemanticHulls", objectTypes=object_types, objectIds=object_ids
).metadata["lastActionSuccess"]
@pytest.mark.parametrize("controller", fifo_wsgi)
@pytest.mark.skip(reason="Colliders need to be moved closer to objects.")
def test_get_object_in_frame(controller):
controller.reset(scene=TEST_SCENE, agentMode="default")
event = controller.step(
action="TeleportFull",
position=dict(x=-1, y=0.900998235, z=-1.25),
rotation=dict(x=0, y=90, z=0),
horizon=0,
standing=True,
)
assert event, "TeleportFull should have succeeded!"
query = controller.step("GetObjectInFrame", x=0.6, y=0.6)
assert not query, "x=0.6, y=0.6 should fail!"
query = controller.step("GetObjectInFrame", x=0.6, y=0.4)
assert query.metadata["actionReturn"].startswith(
"Cabinet"
), "x=0.6, y=0.4 should have a cabinet!"
query = controller.step("GetObjectInFrame", x=0.3, y=0.5)
assert query.metadata["actionReturn"].startswith(
"Fridge"
), "x=0.3, y=0.5 should have a fridge!"
event = controller.reset(renderInstanceSegmentation=True)
assert event.metadata["screenHeight"] == 300
assert event.metadata["screenWidth"] == 300
# exhaustive test
num_tested = 0
for objectId in event.instance_masks.keys():
for obj in event.metadata["objects"]:
if obj["objectId"] == objectId:
break
else:
# object may not be a sim object (e.g., ceiling, floor, wall, etc.)
continue
num_tested += 1
mask = event.instance_masks[objectId]
# subtract 3 pixels off the edge due to pixels being rounded and collider issues
mask = Image.fromarray(mask)
for _ in range(3):
mask_edges = mask.filter(ImageFilter.FIND_EDGES)
mask = ImageChops.subtract(mask, mask_edges)
mask = np.array(mask)
ys, xs = mask.nonzero()
for x, y in zip(xs, ys):
event = controller.step(
action="GetObjectInFrame", x=x / 300, y=y / 300, forceAction=True
)
assert (
event.metadata["actionReturn"] == objectId
), f"Failed at ({x / 300}, {y / 300}) for {objectId} with agent at: {event.metadata['agent']}"
assert (
num_tested == 29
), "There should be 29 objects in the frame, based on the agent's pose!"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_coordinate_from_raycast(controller):
controller.reset(scene=TEST_SCENE)
event = controller.step(
action="TeleportFull",
position=dict(x=-1.5, y=0.900998235, z=-1.5),
rotation=dict(x=0, y=90, z=0),
horizon=0,
standing=True,
)
assert event, "TeleportFull should have succeeded!"
for x, y in [(1.5, 0.5), (1.1, 0.3), (-0.1, 0.8), (-0.5, -0.3)]:
query = controller.step("GetCoordinateFromRaycast", x=x, y=y)
assert not query, f"x={x}, y={y} should fail!"
query = controller.step("GetCoordinateFromRaycast", x=0.5, y=0.5)
assert_near(
query.metadata["actionReturn"],
{"x": -0.344259053, "y": 1.57599819, "z": -1.49999917},
)
query = controller.step("GetCoordinateFromRaycast", x=0.5, y=0.2)
assert_near(
query.metadata["actionReturn"],
{"x": -0.344259053, "y": 2.2694428, "z": -1.49999917},
)
query = controller.step("GetCoordinateFromRaycast", x=0.25, y=0.5)
assert_near(
query.metadata["actionReturn"],
{'x': -0.6037378311157227, 'y': 1.575998306274414, 'z': -1.0518686771392822},
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_get_reachable_positions_with_directions_relative_agent(controller):
controller.reset(TEST_SCENE)
event = controller.step("GetReachablePositions")
num_reachable_aligned = len(event.metadata["actionReturn"])
assert 100 < num_reachable_aligned < 125
controller.step(
action="TeleportFull",
position=dict(x=-1, y=0.900998235, z=-1.25),
rotation=dict(x=0, y=49.11111, z=0),
horizon=0,
standing=True,
)
event = controller.step("GetReachablePositions")
num_reachable_aligned_after_teleport = len(event.metadata["actionReturn"])
assert num_reachable_aligned == num_reachable_aligned_after_teleport
event = controller.step("GetReachablePositions", directionsRelativeAgent=True)
num_reachable_unaligned = len(event.metadata["actionReturn"])
assert 100 < num_reachable_unaligned < 125
assert (
num_reachable_unaligned != num_reachable_aligned
), "Number of reachable positions should differ when using `directionsRelativeAgent`"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_manipulathor_move(controller):
start_position = {"x": -1.5, "y": 0.9009982347488403, "z": -1.5}
event = controller.reset(scene=TEST_SCENE, agentMode="arm", gridSize=0.25)
assert_near(
point1=start_position, point2=event.metadata["agent"]["position"],
)
event = controller.step(action="MoveAgent", ahead=0.25, right=0.15)
assert_near(
point1={"x": -1.649999976158142, "y": 0.9009982347488403, "z": -1.75},
point2=event.metadata["agent"]["position"],
)
event = controller.step(action="MoveAgent", ahead=-0.25, right=-0.15)
assert_near(
point1=start_position, point2=event.metadata["agent"]["position"],
)
event = controller.step(action="MoveRight")
assert_near(
point1={"x": -1.75, "y": 0.9009982347488403, "z": -1.5},
point2=event.metadata["agent"]["position"],
)
event = controller.step(action="MoveLeft")
assert_near(
point1=start_position, point2=event.metadata["agent"]["position"],
)
event = controller.step(action="MoveAhead")
assert_near(
point1={"x": -1.5, "y": 0.9009982347488403, "z": -1.75},
point2=event.metadata["agent"]["position"],
)
event = controller.step(action="MoveBack")
assert_near(
point1=start_position, point2=event.metadata["agent"]["position"],
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_manipulathor_rotate(controller):
event = controller.reset(scene=TEST_SCENE, agentMode="arm", rotateStepDegrees=90)
assert_near(
point1={"x": -0.0, "y": 180.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
event = controller.step(action="RotateAgent", degrees=60)
assert_near(
point1={"x": -0.0, "y": 240.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
event = controller.step(action="RotateRight")
assert_near(
point1={"x": -0.0, "y": 330.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
event = controller.step(action="RotateLeft")
assert_near(
point1={"x": -0.0, "y": 240.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
event = controller.step(action="RotateRight", degrees=60)
assert_near(
point1={"x": -0.0, "y": 300.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
event = controller.step(action="RotateLeft", degrees=60)
assert_near(
point1={"x": -0.0, "y": 240.0, "z": 0.0},
point2=event.metadata["agent"]["rotation"],
)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_unsupported_manipulathor(controller):
controller.reset(agentMode="arm")
event = controller.step(action="PickupObject", x=0.5, y=0.5)
assert not event, "PickupObject(x, y) should have failed with agentMode=arm"
objectId = next(
obj["objectId"] for obj in event.metadata["objects"] if obj["pickupable"]
)
event = controller.step(action="PickupObject", objectId=objectId, forceAction=True)
assert not event, "PickupObject(objectId) should have failed with agentMode=arm"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_set_random_seed(controller):
orig_frame = controller.last_event.frame
controller.step(action="SetRandomSeed", seed=41)
s41_frame = controller.step(action="RandomizeMaterials").frame
controller.step(action="SetRandomSeed", seed=42)
s42_frame = controller.step(action="RandomizeMaterials").frame
images_far(s42_frame, s41_frame)
images_far(s42_frame, orig_frame)
images_far(s41_frame, orig_frame)
f1_1 = controller.reset().frame
f1_2 = controller.step(action="SetRandomSeed", seed=42).frame
f1_3 = controller.step(action="RandomizeMaterials").frame
f2_1 = controller.reset().frame
f2_2 = controller.step(action="SetRandomSeed", seed=42).frame
f2_3 = controller.step(action="RandomizeMaterials").frame
images_near(f1_1, f2_1)
images_near(f1_1, f1_2)
images_near(f2_1, f2_2)
images_near(f1_3, f2_3)
images_far(f2_1, f2_3)
images_far(f1_1, f1_3)
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_randomize_materials_scenes(controller):
for p in [0, 200, 300, 400]:
controller.reset(scene=f"FloorPlan{p + 20}")
meta = controller.step("RandomizeMaterials").metadata["actionReturn"]
assert meta["useTrainMaterials"]
assert meta["useExternalMaterials"]
assert not meta["useValMaterials"]
assert not meta["useTestMaterials"]
controller.reset(scene=f"FloorPlan{p + 21}")
meta = controller.step("RandomizeMaterials").metadata["actionReturn"]
assert not meta["useTrainMaterials"]
assert not meta["useExternalMaterials"]
assert meta["useValMaterials"]
assert not meta["useTestMaterials"]
controller.reset(scene=f"FloorPlan{p + 25}")
meta = controller.step("RandomizeMaterials").metadata["actionReturn"]
assert not meta["useTrainMaterials"]
assert not meta["useExternalMaterials"]
assert meta["useValMaterials"]
assert not meta["useTestMaterials"]
controller.reset(scene=f"FloorPlan{p + 26}")
meta = controller.step("RandomizeMaterials").metadata["actionReturn"]
assert not meta["useTrainMaterials"]
assert not meta["useExternalMaterials"]
assert not meta["useValMaterials"]
assert meta["useTestMaterials"]
controller.reset(scene=f"FloorPlan_Train5_3")
meta = controller.step("RandomizeMaterials").metadata["actionReturn"]
assert meta["useTrainMaterials"]
assert meta["useExternalMaterials"]
assert not meta["useValMaterials"]
assert not meta["useTestMaterials"]
controller.reset(scene=f"FloorPlan_Val2_1")
meta = controller.step("RandomizeMaterials").metadata["actionReturn"]
assert not meta["useTrainMaterials"]
assert not meta["useExternalMaterials"]
assert meta["useValMaterials"]
assert not meta["useTestMaterials"]
controller.step(action="ResetMaterials")
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_randomize_materials_clearOnReset(controller):
f1 = controller.last_event.frame.astype(np.float16)
f2 = controller.step(action="RandomizeMaterials").frame.astype(np.float16)
f3 = controller.reset().frame.astype(np.float16)
# giving some leway with 0.05, but that as a baseline should be plenty enough
assert (
np.abs(f1 - f2).flatten() / 255
).sum() / 300 / 300 > 0.05, "Expected material change"
assert (
np.abs(f2 - f3).flatten() / 255
).sum() / 300 / 300 > 0.05, "Expected material change"
assert (
np.abs(f1 - f3).flatten() / 255
).sum() / 300 / 300 < 0.01, "Materials should look the same"
f1 = controller.reset().frame.astype(np.float16)
f2 = controller.step(action="RandomizeMaterials").frame.astype(np.float16)
f3 = controller.step(action="ResetMaterials").frame.astype(np.float16)
assert (
np.abs(f1 - f2).flatten() / 255
).sum() / 300 / 300 > 0.05, "Expected material change"
assert (
np.abs(f2 - f3).flatten() / 255
).sum() / 300 / 300 > 0.05, "Expected material change"
assert (
np.abs(f1 - f3).flatten() / 255
).sum() / 300 / 300 < 0.01, "Materials should look the same"
@pytest.mark.parametrize("controller", fifo)
def test_directionalPush(controller):
positions = []
for angle in [0, 90, 180, 270, -1, -90]:
controller.reset(scene="FloorPlan28")
start = controller.step(
action="TeleportFull",
position=dict(x=-3.25, y=0.9, z=-1.25),
rotation=dict(x=0, y=0, z=0),
horizon=30,
standing=True,
)
# z increases
end = controller.step(
action="DirectionalPush",
pushAngle=angle,
objectId="Tomato|-03.13|+00.92|-00.39",
moveMagnitude=25,
)
start_obj = next(
obj for obj in start.metadata["objects"] if obj["objectType"] == "Tomato"
)
end_obj = next(
obj for obj in end.metadata["objects"] if obj["objectType"] == "Tomato"
)
positions.append((start_obj["position"], end_obj["position"]))
assert positions[0][1]["z"] - positions[0][0]["z"] > 0.2
assert positions[4][1]["z"] - positions[4][0]["z"] > 0.2
assert positions[1][1]["x"] - positions[1][0]["x"] > 0.2
assert positions[2][1]["z"] - positions[2][0]["z"] < -0.2
assert positions[3][1]["x"] - positions[3][0]["x"] < -0.2
assert positions[5][1]["x"] - positions[5][0]["x"] < -0.2
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_randomize_materials_params(controller):
controller.reset(scene="FloorPlan15")
meta = controller.step(
action="RandomizeMaterials",
useTrainMaterials=True,
useValMaterials=True,
useTestMaterials=True,
useExternalMaterials=False,
).metadata["actionReturn"]
assert meta["useTrainMaterials"]
assert not meta["useExternalMaterials"]
assert meta["useValMaterials"]
assert meta["useTestMaterials"]
assert not controller.step(action="RandomizeMaterials", useTrainMaterials=False)
assert controller.step(action="RandomizeMaterials", inRoomTypes=["Kitchen"])
assert controller.step(
action="RandomizeMaterials", inRoomTypes=["Kitchen", "LivingRoom"],
)
assert not controller.step(action="RandomizeMaterials", inRoomTypes=["LivingRoom"])
assert not controller.step(action="RandomizeMaterials", inRoomTypes=["RoboTHOR"])
controller.reset(scene="FloorPlan_Train5_2")
assert not controller.step(
action="RandomizeMaterials", inRoomTypes=["Kitchen", "LivingRoom"],
)
assert not controller.step(action="RandomizeMaterials", inRoomTypes=["LivingRoom"])
assert controller.step(action="RandomizeMaterials", inRoomTypes=["RoboTHOR"])
controller.reset(scene="FloorPlan_Val3_2")
assert not controller.step(
action="RandomizeMaterials", inRoomTypes=["Kitchen", "LivingRoom"],
)
assert not controller.step(action="RandomizeMaterials", inRoomTypes=["LivingRoom"])
assert controller.step(action="RandomizeMaterials", inRoomTypes=["RoboTHOR"])
controller.step(action="ResetMaterials")
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_invalid_arguments(controller):
with pytest.raises(ValueError):
event = controller.step(
action="PutObject",
x=0.0,
y=0.0,
z=1.0,
forceAction=False,
placeStationary=True,
)
print("Err {0}".format(controller.last_event.metadata["lastActionSuccess"]))
assert not controller.last_event.metadata[
"lastActionSuccess"
], "Extra parameter 'z' in action"
assert controller.last_event.metadata[
"errorMessage"
], "errorMessage with invalid argument"
@pytest.mark.parametrize("controller", fifo)
def test_drop_object(controller):
for action in ["DropHeldObject", "DropHandObject"]:
assert not controller.last_event.metadata["inventoryObjects"]
controller.step(
action="PickupObject",
objectId="SoapBottle|-00.84|+00.93|-03.76",
forceAction=True,
)
assert (
controller.last_event.metadata["inventoryObjects"][0]["objectId"]
== "SoapBottle|-00.84|+00.93|-03.76"
)
controller.step(action=action)
assert not controller.last_event.metadata["inventoryObjects"]
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_segmentation_colors(controller):
event = controller.reset(renderSemanticSegmentation=True)
fridge_color = event.object_id_to_color["Fridge"]
assert (
event.color_to_object_id[fridge_color] == "Fridge"
), "Fridge should have this color semantic seg"
event = controller.reset(
renderSemanticSegmentation=False, renderInstanceSegmentation=True
)
fridge_color = event.object_id_to_color["Fridge"]
assert (
event.color_to_object_id[fridge_color] == "Fridge"
), "Fridge should have this color on instance seg"
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_move_hand(controller):
h1 = controller.step(
action="PickupObject",
objectId="SoapBottle|-00.84|+00.93|-03.76",
forceAction=True,
).metadata["heldObjectPose"]
h2 = controller.step(action="MoveHeldObject", ahead=0.1).metadata["heldObjectPose"]
assert_near(h1["rotation"], h2["rotation"])
assert (
0.1 - 1e-3 <= h2["localPosition"]["z"] - h1["localPosition"]["z"] <= 0.1 + 1e-3
)
assert abs(h2["localPosition"]["y"] - h1["localPosition"]["y"]) < 1e-3
assert abs(h2["localPosition"]["x"] - h1["localPosition"]["x"]) < 1e-3
controller.reset()
h1 = controller.step(
action="PickupObject",
objectId="SoapBottle|-00.84|+00.93|-03.76",
forceAction=True,
).metadata["heldObjectPose"]
h2 = controller.step(
action="MoveHeldObject", ahead=0.1, right=0.1, up=0.1
).metadata["heldObjectPose"]
assert_near(h1["rotation"], h2["rotation"])
assert (
0.1 - 1e-3 <= h2["localPosition"]["z"] - h1["localPosition"]["z"] <= 0.1 + 1e-3
)
assert (
0.1 - 1e-3 <= h2["localPosition"]["x"] - h1["localPosition"]["x"] <= 0.1 + 1e-3
)
assert (
0.1 - 1e-3 <= h2["localPosition"]["y"] - h1["localPosition"]["y"] <= 0.1 + 1e-3
)
controller.reset()
h1 = controller.step(
action="PickupObject",
objectId="SoapBottle|-00.84|+00.93|-03.76",
forceAction=True,
).metadata["heldObjectPose"]
h2 = controller.step(
action="MoveHeldObject", ahead=0.1, right=0.05, up=-0.1
).metadata["heldObjectPose"]
assert_near(h1["rotation"], h2["rotation"])
assert (
0.1 - 1e-3 <= h2["localPosition"]["z"] - h1["localPosition"]["z"] <= 0.1 + 1e-3
)
assert (
0.05 - 1e-3
<= h2["localPosition"]["x"] - h1["localPosition"]["x"]
<= 0.05 + 1e-3
)
assert (
-0.1 - 1e-3
<= h2["localPosition"]["y"] - h1["localPosition"]["y"]
<= -0.1 + 1e-3
)
@pytest.mark.parametrize("controller", fifo)
def test_rotate_hand(controller):
# Tests RotateHeldObject and that event.metadata["hand"] is equivalent to
# event.metadata["heldObjectPose"] for backwards compatibility purposes
# PITCH
h1 = controller.step(
action="PickupObject",
objectId="SoapBottle|-00.84|+00.93|-03.76",
forceAction=True,
).metadata["heldObjectPose"]
h2 = controller.step(action="RotateHeldObject", pitch=90).metadata["hand"]
assert_near(h1["position"], h2["position"])
assert h2["rotation"]["x"] - h1["rotation"]["x"] == 90
assert h2["rotation"]["y"] == h1["rotation"]["y"]
assert h2["rotation"]["z"] == h1["rotation"]["z"]
# YAW
controller.reset()
h1 = controller.step(
action="PickupObject",
objectId="SoapBottle|-00.84|+00.93|-03.76",
forceAction=True,
).metadata["heldObjectPose"]
h2 = controller.step(action="RotateHeldObject", yaw=90).metadata["hand"]
assert_near(h1["position"], h2["position"])
assert h2["rotation"]["y"] - h1["rotation"]["y"] == 90
assert h2["rotation"]["x"] == h1["rotation"]["x"]
assert h2["rotation"]["z"] == h1["rotation"]["z"]
# ROLL
controller.reset()
h1 = controller.step(
action="PickupObject",
objectId="SoapBottle|-00.84|+00.93|-03.76",
forceAction=True,
).metadata["heldObjectPose"]
h2 = controller.step(action="RotateHeldObject", roll=90).metadata["hand"]
assert_near(h1["position"], h2["position"])
# NOTE: 270 is expected if you want roll to be positive moving rightward
assert h2["rotation"]["z"] - h1["rotation"]["z"] == 270
assert h2["rotation"]["x"] == h1["rotation"]["x"]
assert h2["rotation"]["y"] == h1["rotation"]["y"]
# ROLL + PITCH + YAW
controller.reset()
h1 = controller.step(
action="PickupObject",
objectId="SoapBottle|-00.84|+00.93|-03.76",
forceAction=True,
).metadata["heldObjectPose"]
h2 = controller.step(action="RotateHeldObject", roll=90, pitch=90, yaw=90).metadata[
"hand"
]
assert_near(h1["position"], h2["position"])
assert_near(h1["rotation"], dict(x=0, y=180, z=0))
# Unity will normalize the rotation, so x=90, y=270, and z=270 becomes x=90, y=0, z=0
assert_near(h2["rotation"], dict(x=90, y=0, z=0))
# local rotation test
controller.reset()
h1 = controller.step(
action="PickupObject",
objectId="SoapBottle|-00.84|+00.93|-03.76",
forceAction=True,
).metadata["hand"]
h2 = controller.step(
action="RotateHeldObject", rotation=dict(x=90, y=180, z=0)
).metadata["hand"]
assert_near(h1["position"], h2["position"])
assert_near(h1["localRotation"], dict(x=0, y=0, z=0))
assert_near(h2["localRotation"], dict(x=90, y=180, z=0))
def test_settle_physics(fifo_controller):
from dictdiffer import diff
fifo_controller.reset(agentMode="arm")
for i in range(30):
fifo_controller.step("AdvancePhysicsStep", raise_for_failure=True)
first_objs = {o['objectId']: o for o in fifo_controller.last_event.metadata["objects"]}
for i in range(30):
fifo_controller.step("AdvancePhysicsStep", raise_for_failure=True)
diffs = []
last_objs = {o['objectId']: o for o in fifo_controller.last_event.metadata["objects"]}
for object_id, object_metadata in first_objs.items():
for d in (diff(object_metadata, last_objs.get(object_id, {}), tolerance=0.00001, ignore=set(["receptacleObjectIds"]))):
diffs.append((object_id, d))
assert diffs == []
@pytest.mark.parametrize("controller", fifo_wsgi)
def test_fill_liquid(controller):
pot = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectId"] == "Pot|-00.61|+00.80|-03.42"
)
assert pot["fillLiquid"] is None
assert not pot["isFilledWithLiquid"]
assert pot["canFillWithLiquid"]
for fillLiquid in ["water", "wine", "coffee"]:
controller.step(
action="FillObjectWithLiquid",
fillLiquid=fillLiquid,
objectId="Pot|-00.61|+00.80|-03.42",
forceAction=True,
)
pot = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectId"] == "Pot|-00.61|+00.80|-03.42"
)
assert pot["fillLiquid"] == fillLiquid
assert pot["isFilledWithLiquid"]
assert pot["canFillWithLiquid"]
controller.step(
action="EmptyLiquidFromObject",
objectId="Pot|-00.61|+00.80|-03.42",
forceAction=True,
)
pot = next(
obj
for obj in controller.last_event.metadata["objects"]
if obj["objectId"] == "Pot|-00.61|+00.80|-03.42"
)
assert pot["fillLiquid"] is None
assert not pot["isFilledWithLiquid"]
assert pot["canFillWithLiquid"]
def test_timeout():
kwargs = {
"server_timeout": 2.0
}
for c in [
build_controller(server_class=WsgiServer, **kwargs),
build_controller(server_class=FifoServer, **kwargs)
]:
c.step("Sleep", seconds=1)
assert c.last_event.metadata["lastActionSuccess"]
with pytest.raises(TimeoutError):
c.step("Sleep", seconds=4)
# Above crash should kill the unity process
time.sleep(1.0)
assert c.server.unity_proc.poll() is not None
| ai2thor-main | ai2thor/tests/test_unity.py |
# import pytest
import json
import os
import cv2
import numpy as np
import pytest
from ai2thor.fifo_server import FifoServer
from ai2thor.wsgi_server import WsgiServer
from .test_unity import build_controller, depth_images_near, images_near
DATA_PATH = "ai2thor/tests/data/"
IMAGE_FOLDER_PATH = os.path.join(DATA_PATH, "procedural")
SCENE = "Procedural"
shared_args = dict(
scene="Procedural",
gridSize=0.25,
port=8200,
width=300,
height=300,
fieldOfView=45,
agentCount=1,
)
_wsgi_controller = dict(server_class=WsgiServer, **shared_args)
_fifo_controller = dict(server_class=FifoServer, **shared_args)
fifo_wsgi = [_fifo_controller, _wsgi_controller]
wsgi = [_wsgi_controller]
fifo = [_fifo_controller]
def create_pixel_diff_image(img, g_truth):
dx = np.where(~np.all(g_truth == img, axis=-1))
copy = img.copy()
copy[dx] = (255, 0, 255)
return copy
house_template = {
"id": "house_0",
"layout": """
0 0 0 0 0 0
0 2 2 2 2 0
0 2 2 2 2 0
0 1 1 1 1 0
0 1 1 1 1 0
0 0 0 0 0 0
""",
"objectsLayouts": [
"""
0 0 0 0 0 0
0 2 2 2 2 0
0 2 2 2 = 0
0 1 1 1 = 0
0 1 1 1 + 0
0 0 0 0 0 0
"""
],
"rooms": {
"1": {
"wallTemplate": {
"material": {
"unlit": False,
"color": {"r": 1.0, "g": 0.0, "b": 0.0, "a": 1.0},
}
},
"floorTemplate": {
"roomType": "Bedroom",
"floorMaterial": {"name": "DarkWoodFloors"},
},
"floorYPosition": 0.0,
"wallHeight": 3.0,
},
"2": {
"wallTemplate": {
"material": {
"unlit": False,
"color": {"r": 0.0, "g": 0.0, "b": 1.0, "a": 1.0},
}
},
"floorTemplate": {
"roomType": "LivingRoom",
"floorMaterial": {"name": "RedBrick"},
},
"floorYPosition": 0.0,
"wallHeight": 3.0,
},
},
"holes": {"=": {"room0": "1", "openness": 1.0, "assetId": "Doorway_1"}},
"objects": {"+": {"kinematic": True, "assetId": "Chair_007_1"}},
"proceduralParameters": {
"floorColliderThickness": 1.0,
"receptacleHeight": 0.7,
"skyboxId": "Sky1",
"ceilingMaterial": {"name": "ps_mat"},
},
"metadata": {"schema": "1.0.0"},
}
# TODO rendering is different for fifo and wsgi server
@pytest.mark.parametrize("controller_args", fifo)
def test_render_lit(controller_args):
print("Args")
print(controller_args)
controller = build_controller(**controller_args)
rgb_filename = "proc_rgb_lit_fifo.png"
ground_truth = cv2.imread(os.path.join(IMAGE_FOLDER_PATH, rgb_filename))
evt = controller.step(action="GetHouseFromTemplate", template=house_template)
print(
"Action success {0}, message {1}".format(
evt.metadata["lastActionSuccess"], evt.metadata["errorMessage"]
)
)
assert evt.metadata["lastActionSuccess"], evt.metadata["errorMessage"]
house = evt.metadata["actionReturn"]
with open("test_render_lit.json", "w") as f:
print(house)
json.dump(house, f)
evt = controller.step(action="CreateHouse", house=house)
print(
"Action success {0}, message {1}".format(
evt.metadata["lastActionSuccess"], evt.metadata["errorMessage"]
)
)
assert evt.metadata["lastActionSuccess"]
evt = controller.step(
action="TeleportFull",
x=3.0,
y=0.9010001,
z=1.0,
rotation=dict(x=0, y=0, z=0),
horizon=0,
standing=True,
forceAction=True,
)
controller.stop()
assert images_near(
evt.cv2img, ground_truth, max_mean_pixel_diff=52, debug_save=True
)
#
# @pytest.mark.parametrize("controller_args", wsgi)
# def test_render_lit_2(controller_args):
# rgb_filename = "proc_rgb_lit.png"
# ground_truth = cv2.imread(os.path.join(IMAGE_FOLDER_PATH, rgb_filename))
# rgb_filename = "proc_rgb_lit_server.png"
# server_image = cv2.imread(os.path.join(IMAGE_FOLDER_PATH, rgb_filename))
# assert images_near(server_image, ground_truth, max_mean_pixel_diff=8, debug_save=True)
#
#
# @pytest.mark.parametrize("controller_args", wsgi)
# def test_render_depth_2(controller_args):
# depth_filename = "proc_depth.npy"
# raw_depth = np.load(os.path.join(IMAGE_FOLDER_PATH, depth_filename))
# depth_filename = "proc_depth_server.npy"
# server_image = np.load(os.path.join(IMAGE_FOLDER_PATH, depth_filename))
# print("HIIII")
# assert depth_images_near(server_image, raw_depth, epsilon=2e-1, debug_save=True)
@pytest.mark.parametrize("controller_args", fifo)
def test_depth(controller_args):
controller_args.update(
renderDepthImage=True,
)
controller = build_controller(**controller_args)
depth_filename = "proc_depth.npy"
raw_depth = np.load(os.path.join(IMAGE_FOLDER_PATH, depth_filename))
evt = controller.step(action="GetHouseFromTemplate", template=house_template)
assert evt.metadata["lastActionSuccess"], evt.metadata["errorMessage"]
house = evt.metadata["actionReturn"]
evt = controller.step(action="CreateHouse", house=house)
print(
"Action success {0}, message {1}".format(
evt.metadata["lastActionSuccess"], evt.metadata["errorMessage"]
)
)
assert evt.metadata["lastActionSuccess"]
evt = controller.step(
action="TeleportFull",
x=3.0,
y=0.9010001,
z=1.0,
rotation=dict(x=0, y=0, z=0),
horizon=0,
standing=True,
forceAction=True,
)
controller.stop()
assert depth_images_near(evt.depth_frame, raw_depth, epsilon=1e-1, debug_save=True)
| ai2thor-main | ai2thor/tests/test_unity_procedural.py |
import ai2thor.controller
from ai2thor.server import Event
from ai2thor.platform import CloudRendering, Linux64
import pytest
import numpy as np
import warnings
import os
import math
def fake_linux64_exists(self):
if self.platform.name() == "Linux64":
return True
else:
return False
@classmethod
def fake_invalid_cr_validate(cls, request):
return ["Missing libvulkan1."]
@classmethod
def fake_invalid_linux64_validate(cls, request):
return ["No display found. "]
def fake_cr_exists(self):
if self.platform.name() == "CloudRendering":
return True
else:
return False
def fake_not_exists(self):
return False
def fake_find_platform_builds(self, canditate_platorms, request, commits, releases_dir, local_build):
return []
def fake_exists(self):
return True
def fake_linux_system():
return "Linux"
def fake_darwin_system():
return "Darwin"
def noop_download(self):
pass
def select_platforms_linux_cr(request):
return (Linux64, CloudRendering)
def select_platforms_cr(request):
return (CloudRendering, )
@classmethod
def fake_validate(cls, request):
return []
class FakeServer(object):
def __init__(self):
self.request_queue = FakeQueue()
self.response_queue = FakeQueue()
def send(self, action):
assert self.request_queue.empty()
self.response_queue.put_nowait(action)
def receive(self):
return self.request_queue.get(False, 0)
class FakeQueue(object):
def __init__(self):
self.value = None
def put_nowait(self, v):
assert self.value is None
self.value = v
def get(self, block=False, timeout=0):
v = self.value
self.value = None
return v
# always return empty so that we pass
def empty(self):
return True
def controller(**args):
# delete display so the tests can run on Linux
if "DISPLAY" in os.environ:
del os.environ["DISPLAY"]
# during a ci-build we will get a warning that we are using a commit_id for the
# build instead of 'local'
default_args = dict(download_only=True)
default_args.update(args)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
c = ai2thor.controller.Controller(**default_args)
c.server = FakeServer()
return c
def test_osx_build_missing(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.Controller.find_platform_builds", fake_find_platform_builds)
with pytest.raises(Exception) as ex:
c = controller()
assert str(ex.value).startswith("No build exists for arch=Darwin platforms=OSXIntel64 and commits:")
def test_osx_build_invalid_commit_id(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_not_exists)
fake_commit_id = "1234567TEST"
with pytest.raises(ValueError) as ex:
c = controller(commit_id=fake_commit_id)
assert (
str(ex.value)
== "Invalid commit_id: %s - no build exists for arch=Darwin platforms=OSXIntel64" % fake_commit_id
)
def test_osx_build(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "OSXIntel64"
assert c._build.commit_id == fake_commit_id
def test_linux_explicit_xdisplay(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id, x_display="75.9")
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_linux_invalid_linux64_invalid_cr(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate",
fake_invalid_cr_validate,
)
mocker.patch(
"ai2thor.controller.ai2thor.platform.Linux64.validate",
fake_invalid_linux64_validate,
)
fake_commit_id = "1234567TEST"
with pytest.raises(Exception) as excinfo:
c = controller(commit_id=fake_commit_id)
assert str(excinfo.value).startswith(
"The following builds were found, but had missing dependencies. Only one valid platform is required to run AI2-THOR."
)
def test_linux_invalid_linux64_valid_cr(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch(
"ai2thor.controller.ai2thor.platform.Linux64.validate",
fake_invalid_linux64_validate,
)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "CloudRendering"
assert c._build.commit_id == fake_commit_id
def test_linux_valid_linux64_valid_cloudrendering(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_linux_valid_linux64_valid_cloudrendering_enabled_cr(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_cr)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
mocker.patch("ai2thor.platform.Linux64.enabled", False)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "CloudRendering"
assert c._build.commit_id == fake_commit_id
def test_linux_valid_linux64_invalid_cloudrendering(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate",
fake_invalid_cr_validate,
)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_linux_missing_linux64(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_cr_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch(
"ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
)
mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "CloudRendering"
assert c._build.commit_id == fake_commit_id
def test_linux_missing_cloudrendering(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_linux64_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
fake_commit_id = "1234567TEST"
c = controller(commit_id=fake_commit_id)
assert c._build.platform.name() == "Linux64"
assert c._build.commit_id == fake_commit_id
def test_distance():
point1 = dict(x=1.5, z=2.5)
point2 = dict(x=4.33, z=7.5)
point3 = dict(x=2.5, z=3.5)
assert ai2thor.controller.distance(point1, point2) == 5.745337239884183
assert ai2thor.controller.distance(point1, point1) == 0.0
assert ai2thor.controller.distance(point1, point3) == math.sqrt(2.0)
def test_key_for_point():
assert ai2thor.controller.key_for_point(2.567, -3.43) == "2.6 -3.4"
def test_invalid_commit(mocker):
caught_exception = False
try:
c = ai2thor.controller.Controller(commit_id="1234567x")
except ValueError as e:
caught_exception = True
assert caught_exception, "invalid commit id should throw ValueError"
def test_scene_names(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
c = controller()
assert len(c.scene_names()) == 195
assert len(c.ithor_scenes()) == 120
assert len(c.robothor_scenes()) == 195 - 120
def test_invalid_action(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(
screenWidth=300,
screenHeight=300,
colors=[],
lastActionSuccess=False,
errorCode="InvalidAction",
errorMessage="Invalid method: moveaheadbadmethod",
)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="MoveaheadbadMethod")
c.server.request_queue.put_nowait(fake_event)
with pytest.raises(ValueError) as excinfo:
c.step(action1, raise_for_failure=True)
assert excinfo.value.args == ("Invalid method: moveaheadbadmethod",)
def test_fix_visibility_distance_env(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
try:
os.environ["AI2THOR_VISIBILITY_DISTANCE"] = "2.0"
fake_event = Event(
dict(screenWidth=300, screenHeight=300, colors=[], lastActionSuccess=True)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="Initialize", gridSize=0.25)
c.server.request_queue.put_nowait(fake_event)
c.step(action1)
filtered_action = c.server.response_queue.get()
assert filtered_action == {
"action": "Initialize",
"gridSize": 0.25,
"visibilityDistance": 2.0,
}
finally:
del os.environ["AI2THOR_VISIBILITY_DISTANCE"]
def test_raise_for_failure(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(
screenWidth=300,
screenHeight=300,
colors=[],
lastActionSuccess=False,
errorCode="NotOpen",
)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="MoveAhead")
c.server.request_queue.put_nowait(fake_event)
with pytest.raises(RuntimeError):
c.step(action1, raise_for_failure=True)
def test_failure(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(
screenWidth=300,
screenHeight=300,
colors=[],
lastActionSuccess=False,
errorCode="NotOpen",
)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="MoveAhead")
c.server.request_queue.put_nowait(fake_event)
e = c.step(action1)
assert c.last_action == action1
assert not e.metadata["lastActionSuccess"]
def test_last_action(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_darwin_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
fake_event = Event(
dict(screenWidth=300, screenHeight=300, colors=[], lastActionSuccess=True)
)
c = controller()
c.last_event = fake_event
action1 = dict(action="RotateRight")
c.server.request_queue.put_nowait(fake_event)
e = c.step(action1)
assert c.last_action == action1
assert e.metadata["lastActionSuccess"]
c = controller()
c.last_event = fake_event
action2 = dict(action="RotateLeft")
c.server.request_queue.put_nowait(fake_event)
e = c.step(action2)
assert c.last_action == action2
assert e.metadata["lastActionSuccess"]
def test_unity_command_force_device_index(mocker):
pass
# TODO: this test is no longer valid as the mapping between CUDA/Vulkan
# devices and CUDA devices is more arbitrary than we first believed.
# We should find a way to test this in a more robust way.
# mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
# mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
# mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
# mocker.patch("ai2thor.controller.ai2thor.platform.select_platforms", select_platforms_linux_cr)
# mocker.patch(
# "ai2thor.controller.ai2thor.platform.CloudRendering.validate", fake_validate
# )
# mocker.patch(
# "ai2thor.controller.ai2thor.platform.Linux64.validate",
# fake_invalid_linux64_validate,
# )
#
# mocker.patch("ai2thor.platform.CloudRendering.enabled", True)
# original_visible_devices = os.environ.get("CUDA_VISIBLE_DEVICES")
# try:
# os.environ["CUDA_VISIBLE_DEVICES"] = "2,3,4"
#
# c = controller(platform=CloudRendering, gpu_device=1)
# assert c.unity_command(650, 550, False) == [
# c._build.executable_path,
# "-screen-fullscreen",
# "0",
# "-screen-quality",
# "7",
# "-screen-width",
# "650",
# "-screen-height",
# "550",
# '-force-device-index',
# '4'
# ]
# finally:
# if original_visible_devices:
# os.environ["CUDA_VISIBLE_DEVICES"] = original_visible_devices
# else:
# del os.environ["CUDA_VISIBLE_DEVICES"]
#
# c = controller(platform=CloudRendering, gpu_device=5)
# assert c.unity_command(650, 550, False) == [
# c._build.executable_path,
# "-screen-fullscreen",
# "0",
# "-screen-quality",
# "7",
# "-screen-width",
# "650",
# "-screen-height",
# "550",
# '-force-device-index',
# '6'
# ]
#
# c = controller(platform=CloudRendering, gpu_device=0)
# assert c.unity_command(650, 550, False) == [
# c._build.executable_path,
# "-screen-fullscreen",
# "0",
# "-screen-quality",
# "7",
# "-screen-width",
# "650",
# "-screen-height",
# "550",
# '-force-device-index',
# '0'
# ]
def test_unity_command(mocker):
mocker.patch("ai2thor.controller.platform_system", fake_linux_system)
mocker.patch("ai2thor.controller.ai2thor.build.Build.exists", fake_exists)
mocker.patch("ai2thor.controller.ai2thor.build.Build.download", noop_download)
mocker.patch("ai2thor.controller.ai2thor.platform.Linux64.validate", fake_validate)
c = controller()
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"0",
"-screen-quality",
"7",
"-screen-width",
"650",
"-screen-height",
"550",
]
c = controller(fullscreen=True, quality="Low")
assert c.unity_command(650, 550, False) == [
c._build.executable_path,
"-screen-fullscreen",
"1",
"-screen-quality",
"2",
"-screen-width",
"650",
"-screen-height",
"550",
]
| ai2thor-main | ai2thor/tests/test_controller.py |
import datetime
import json
import pdb
import os
import sys
root_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/..")
sys.path.insert(0, root_dir)
import ai2thor.controller
import math
import ai2thor
import random
import copy
MAX_TESTS = 20
MAX_EP_LEN = 100
scene_names = ["FloorPlan{}_physics".format(i + 1) for i in range(30)]
set_of_actions = ["mm", "rr", "ll", "w", "z", "a", "s", "u", "j", "3", "4", "p"]
controller = ai2thor.controller.Controller(
local_build=True,
scene=scene_names[0],
gridSize=0.25,
width=900,
height=900,
agentMode="arm",
fieldOfView=100,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
)
ADITIONAL_ARM_ARGS = {
"disableRendering": True,
"restrictMovement": False,
"waitForFixedUpdate": False,
"eventCollisions": True,
"returnToStart": True,
"speed": 1,
"move_constant": 0.05,
}
def get_reachable_positions(controller):
event = controller.step("GetReachablePositions")
reachable_positions = event.metadata["reachablePositions"]
return reachable_positions
def execute_command(controller, command, action_dict_addition):
base_position = get_current_arm_state(controller)
change_height = action_dict_addition["move_constant"]
change_value = change_height
action_details = {}
if command == "w":
base_position["z"] += change_value
elif command == "z":
base_position["z"] -= change_value
elif command == "s":
base_position["x"] += change_value
elif command == "a":
base_position["x"] -= change_value
elif command == "3":
base_position["y"] += change_value
elif command == "4":
base_position["y"] -= change_value
elif command == "u":
base_position["h"] += change_height
elif command == "j":
base_position["h"] -= change_height
elif command == "/":
action_details = dict("")
pickupable = controller.last_event.metadata["arm"]["pickupableObjects"]
print(pickupable)
elif command == "d":
controller.step(action="DropMidLevelHand", **action_dict_addition)
action_details = dict(action="DropMidLevelHand", **action_dict_addition)
elif command == "mm":
action_dict_addition = copy.copy(action_dict_addition)
if "moveSpeed" in action_dict_addition:
action_dict_addition["speed"] = action_dict_addition["moveSpeed"]
controller.step(
action="MoveContinuous",
direction=dict(x=0.0, y=0.0, z=0.2),
**action_dict_addition
)
action_details = dict(
action="MoveContinuous",
direction=dict(x=0.0, y=0.0, z=0.2),
**action_dict_addition
)
elif command == "rr":
action_dict_addition = copy.copy(action_dict_addition)
if "moveSpeed" in action_dict_addition:
action_dict_addition["speed"] = action_dict_addition["moveSpeed"]
controller.step(action="RotateContinuous", degrees=45, **action_dict_addition)
action_details = dict(
action="RotateContinuous", degrees=45, **action_dict_addition
)
elif command == "ll":
action_dict_addition = copy.copy(action_dict_addition)
controller.step(action="RotateContinuous", degrees=-45, **action_dict_addition)
action_details = dict(
action="RotateContinuous", degrees=-45, **action_dict_addition
)
elif command == "m":
controller.step(action="MoveAhead", **action_dict_addition)
action_details = dict(action="MoveAhead", **action_dict_addition)
elif command == "r":
controller.step(action="RotateRight", degrees=45, **action_dict_addition)
action_details = dict(action="RotateRight", degrees=45, **action_dict_addition)
elif command == "l":
controller.step(action="RotateLeft", degrees=45, **action_dict_addition)
action_details = dict(action="RotateLeft", degrees=45, **action_dict_addition)
elif command == "p":
controller.step(action="PickUpMidLevelHand")
action_details = dict(action="PickUpMidLevelHand")
elif command == "q":
action_details = {}
else:
action_details = {}
if command in ["w", "z", "s", "a", "3", "4"]:
controller.step(
action="MoveMidLevelArm",
position=dict(
x=base_position["x"], y=base_position["y"], z=base_position["z"]
),
handCameraSpace=False,
**action_dict_addition
)
action_details = dict(
action="MoveMidLevelArm",
position=dict(
x=base_position["x"], y=base_position["y"], z=base_position["z"]
),
handCameraSpace=False,
**action_dict_addition
)
elif command in ["u", "j"]:
if base_position["h"] > 1:
base_position["h"] = 1
elif base_position["h"] < 0:
base_position["h"] = 0
controller.step(
action="MoveArmBase", y=base_position["h"], **action_dict_addition
)
action_details = dict(
action="MoveArmBase", y=base_position["h"], **action_dict_addition
)
return action_details
def get_current_arm_state(controller):
h_min = 0.450998873
h_max = 1.8009994
event = controller.last_event
joints = event.metadata["arm"]["joints"]
arm = joints[-1]
assert arm["name"] == "robot_arm_4_jnt"
xyz_dict = arm["rootRelativePosition"]
height_arm = joints[0]["position"]["y"]
xyz_dict["h"] = (height_arm - h_min) / (h_max - h_min)
# print_error([x['position']['y'] for x in joints])
return xyz_dict
def reset_the_scene_and_get_reachables(scene_name=None):
if scene_name is None:
scene_name = random.choice(scene_names)
controller.reset(scene_name)
return get_reachable_positions(controller)
def two_list_equal(l1, l2):
dict1 = {i: v for (i, v) in enumerate(l1)}
dict2 = {i: v for (i, v) in enumerate(l2)}
return two_dict_equal(dict1, dict2)
def two_dict_equal(dict1, dict2):
# removing calls to len to resolve https://lgtm.com/rules/7860092/
dict_equal = len(dict1) == len(dict2)
assert dict_equal, ("different len", dict1, dict2)
equal = True
for k in dict1:
val1 = dict1[k]
val2 = dict2[k]
# https://lgtm.com/rules/7860092/
type_equal = type(val1) == type(val2)
assert type_equal, ("different type", dict1, dict2)
if type(val1) == dict:
equal = two_dict_equal(val1, val2)
elif type(val1) == list:
equal = two_list_equal(val1, val2)
elif math.isnan(val1):
equal = math.isnan(val2)
elif type(val1) == float:
equal = abs(val1 - val2) < 0.001
else:
equal = val1 == val2
if not equal:
print("not equal", val1, val2)
return equal
return equal
def get_current_full_state(controller):
return {
"agent_position": controller.last_event.metadata["agent"]["position"],
"agent_rotation": controller.last_event.metadata["agent"]["rotation"],
"arm_state": controller.last_event.metadata["arm"]["joints"],
"held_object": controller.last_event.metadata["arm"]["heldObjects"],
}
def random_tests():
all_timers = []
all_dict = {}
for i in range(MAX_TESTS):
print("test number", i)
reachable_positions = reset_the_scene_and_get_reachables()
initial_location = random.choice(reachable_positions)
initial_rotation = random.choice([i for i in range(0, 360, 45)])
controller.step(
action="TeleportFull",
x=initial_location["x"],
y=initial_location["y"],
z=initial_location["z"],
rotation=dict(x=0, y=initial_rotation, z=0),
horizon=10,
)
initial_pose = dict(
action="TeleportFull",
x=initial_location["x"],
y=initial_location["y"],
z=initial_location["z"],
rotation=dict(x=0, y=initial_rotation, z=0),
horizon=10,
)
controller.step("PausePhysicsAutoSim")
all_commands = []
before = datetime.datetime.now()
for j in range(MAX_EP_LEN):
command = random.choice(set_of_actions)
execute_command(controller, command, ADITIONAL_ARM_ARGS)
all_commands.append(command)
pickupable = controller.last_event.metadata["arm"]["pickupableObjects"]
picked_up_before = controller.last_event.metadata["arm"]["heldObjects"]
if len(pickupable) > 0 and len(picked_up_before) == 0:
cmd = "p"
execute_command(controller, cmd, ADITIONAL_ARM_ARGS)
all_commands.append(cmd)
if controller.last_event.metadata["lastActionSuccess"] is False:
print("Failed to pick up ")
print("scene name", controller.last_event.metadata["sceneName"])
print("initial pose", initial_pose)
print("list of actions", all_commands)
break
after = datetime.datetime.now()
time_diff = after - before
seconds = time_diff.total_seconds()
all_timers.append(len(all_commands) / seconds)
final_state = get_current_full_state(
controller
) # made sure this does not require deep copy
scene_name = controller.last_event.metadata["sceneName"]
# TODO only when pick up has happened
dict_to_add = {
"initial_location": initial_location,
"initial_rotation": initial_rotation,
"all_commands": all_commands,
"final_state": final_state,
"initial_pose": initial_pose,
"scene_name": scene_name,
}
all_dict[len(all_dict)] = dict_to_add
# print('FPS', sum(all_timers) / len(all_timers))
return all_dict
def determinism_test(all_tests):
# Redo the actions 20 times:
# only do this if an object is picked up
for k, test_point in all_tests.items():
initial_location = test_point["initial_location"]
initial_rotation = test_point["initial_rotation"]
all_commands = test_point["all_commands"]
final_state = test_point["final_state"]
initial_pose = test_point["initial_pose"]
scene_name = test_point["scene_name"]
controller.reset(scene_name)
controller.step(
action="TeleportFull",
x=initial_location["x"],
y=initial_location["y"],
z=initial_location["z"],
rotation=dict(x=0, y=initial_rotation, z=0),
horizon=10,
)
controller.step("PausePhysicsAutoSim")
for cmd in all_commands:
execute_command(controller, cmd, ADITIONAL_ARM_ARGS)
current_state = get_current_full_state(controller)
if not two_dict_equal(final_state, current_state):
print("not deterministic")
print("scene name", controller.last_event.metadata["sceneName"])
print("initial pose", initial_pose)
print("list of actions", all_commands)
pdb.set_trace()
else:
print("test {} passed".format(k))
if __name__ == "__main__":
# all_dict = random_tests()
# with open('determinism_json.json' ,'w') as f:
# json.dump(all_dict, f)
with open("arm_test/determinism_json.json", "r") as f:
all_dict = json.load(f)
determinism_test(all_dict)
| ai2thor-main | arm_test/check_determinism_event_collision_different_machines.py |
import datetime
import json
import pdb
import math
import os
import sys
root_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/..")
sys.path.insert(0, root_dir)
import ai2thor.controller
import ai2thor
import random
import copy
MAX_TESTS = 20
MAX_EP_LEN = 100
scene_names = ["FloorPlan{}_physics".format(i + 1) for i in range(30)]
set_of_actions = ["mm", "rr", "ll", "w", "z", "a", "s", "u", "j", "3", "4", "p"]
controller = ai2thor.controller.Controller(
local_build=True,
scene=scene_names[0],
gridSize=0.25,
width=900,
height=900,
agentMode="arm",
fieldOfView=100,
agentControllerType="mid-level",
server_class=ai2thor.fifo_server.FifoServer,
)
ADITIONAL_ARM_ARGS = {
"disableRendering": True,
"restrictMovement": False,
"waitForFixedUpdate": False,
"returnToStart": True,
"speed": 1,
"move_constant": 0.05,
}
def get_reachable_positions(controller):
event = controller.step("GetReachablePositions")
reachable_positions = event.metadata["reachablePositions"]
return reachable_positions
def execute_command(controller, command, action_dict_addition):
base_position = get_current_arm_state(controller)
change_height = action_dict_addition["move_constant"]
change_value = change_height
action_details = {}
if command == "w":
base_position["z"] += change_value
elif command == "z":
base_position["z"] -= change_value
elif command == "s":
base_position["x"] += change_value
elif command == "a":
base_position["x"] -= change_value
elif command == "3":
base_position["y"] += change_value
elif command == "4":
base_position["y"] -= change_value
elif command == "u":
base_position["h"] += change_height
elif command == "j":
base_position["h"] -= change_height
elif command == "/":
action_details = dict("")
pickupable = controller.last_event.metadata["arm"]["pickupableObjects"]
print(pickupable)
elif command == "d":
controller.step(action="DropMidLevelHand", **action_dict_addition)
action_details = dict(action="DropMidLevelHand", **action_dict_addition)
elif command == "mm":
action_dict_addition = copy.copy(action_dict_addition)
if "moveSpeed" in action_dict_addition:
action_dict_addition["speed"] = action_dict_addition["moveSpeed"]
controller.step(
action="MoveContinuous",
direction=dict(x=0.0, y=0.0, z=0.2),
**action_dict_addition
)
action_details = dict(
action="MoveContinuous",
direction=dict(x=0.0, y=0.0, z=0.2),
**action_dict_addition
)
elif command == "rr":
action_dict_addition = copy.copy(action_dict_addition)
if "moveSpeed" in action_dict_addition:
action_dict_addition["speed"] = action_dict_addition["moveSpeed"]
controller.step(action="RotateContinuous", degrees=45, **action_dict_addition)
action_details = dict(
action="RotateContinuous", degrees=45, **action_dict_addition
)
elif command == "ll":
action_dict_addition = copy.copy(action_dict_addition)
controller.step(action="RotateContinuous", degrees=-45, **action_dict_addition)
action_details = dict(
action="RotateContinuous", degrees=-45, **action_dict_addition
)
elif command == "m":
controller.step(action="MoveAhead", **action_dict_addition)
action_details = dict(action="MoveAhead", **action_dict_addition)
elif command == "r":
controller.step(action="RotateRight", degrees=45, **action_dict_addition)
action_details = dict(action="RotateRight", degrees=45, **action_dict_addition)
elif command == "l":
controller.step(action="RotateLeft", degrees=45, **action_dict_addition)
action_details = dict(action="RotateLeft", degrees=45, **action_dict_addition)
elif command == "p":
controller.step(action="PickUpMidLevelHand")
action_details = dict(action="PickUpMidLevelHand")
elif command == "q":
action_details = {}
else:
action_details = {}
if command in ["w", "z", "s", "a", "3", "4"]:
controller.step(
action="MoveMidLevelArm",
position=dict(
x=base_position["x"], y=base_position["y"], z=base_position["z"]
),
handCameraSpace=False,
**action_dict_addition
)
action_details = dict(
action="MoveMidLevelArm",
position=dict(
x=base_position["x"], y=base_position["y"], z=base_position["z"]
),
handCameraSpace=False,
**action_dict_addition
)
elif command in ["u", "j"]:
if base_position["h"] > 1:
base_position["h"] = 1
elif base_position["h"] < 0:
base_position["h"] = 0
controller.step(
action="MoveArmBase", y=base_position["h"], **action_dict_addition
)
action_details = dict(
action="MoveArmBase", y=base_position["h"], **action_dict_addition
)
return action_details
def get_current_arm_state(controller):
h_min = 0.450998873
h_max = 1.8009994
event = controller.last_event
joints = event.metadata["arm"]["joints"]
arm = joints[-1]
assert arm["name"] == "robot_arm_4_jnt"
xyz_dict = arm["rootRelativePosition"]
height_arm = joints[0]["position"]["y"]
xyz_dict["h"] = (height_arm - h_min) / (h_max - h_min)
# print_error([x['position']['y'] for x in joints])
return xyz_dict
def reset_the_scene_and_get_reachables(scene_name=None):
if scene_name is None:
scene_name = random.choice(scene_names)
controller.reset(scene_name)
return get_reachable_positions(controller)
def two_list_equal(l1, l2):
dict1 = {i: v for (i, v) in enumerate(l1)}
dict2 = {i: v for (i, v) in enumerate(l2)}
return two_dict_equal(dict1, dict2)
def two_dict_equal(dict1, dict2):
# https://lgtm.com/rules/7860092/
dict_equal = len(dict1) == len(dict2)
assert dict_equal, ("different len", dict1, dict2)
equal = True
for k in dict1:
val1 = dict1[k]
val2 = dict2[k]
# https://lgtm.com/rules/7860092/
type_equal = type(val1) == type(val2)
assert type_equal, ("different type", dict1, dict2)
if type(val1) == dict:
equal = two_dict_equal(val1, val2)
elif type(val1) == list:
equal = two_list_equal(val1, val2)
elif math.isnan(val1):
equal = math.isnan(val2)
elif type(val1) == float:
equal = abs(val1 - val2) < 0.001
else:
equal = val1 == val2
if not equal:
print("not equal", val1, val2)
return equal
return equal
def get_current_full_state(controller):
return {
"agent_position": controller.last_event.metadata["agent"]["position"],
"agent_rotation": controller.last_event.metadata["agent"]["rotation"],
"arm_state": controller.last_event.metadata["arm"]["joints"],
"held_object": controller.last_event.metadata["arm"]["heldObjects"],
}
def random_tests():
all_timers = []
all_dict = {}
for i in range(MAX_TESTS):
print("test number", i)
reachable_positions = reset_the_scene_and_get_reachables()
initial_location = random.choice(reachable_positions)
initial_rotation = random.choice([i for i in range(0, 360, 45)])
controller.step(
action="TeleportFull",
x=initial_location["x"],
y=initial_location["y"],
z=initial_location["z"],
rotation=dict(x=0, y=initial_rotation, z=0),
horizon=10,
)
initial_pose = dict(
action="TeleportFull",
x=initial_location["x"],
y=initial_location["y"],
z=initial_location["z"],
rotation=dict(x=0, y=initial_rotation, z=0),
horizon=10,
)
controller.step("PausePhysicsAutoSim")
all_commands = []
before = datetime.datetime.now()
for j in range(MAX_EP_LEN):
command = random.choice(set_of_actions)
execute_command(controller, command, ADITIONAL_ARM_ARGS)
all_commands.append(command)
pickupable = controller.last_event.metadata["arm"]["pickupableObjects"]
picked_up_before = controller.last_event.metadata["arm"]["heldObjects"]
if len(pickupable) > 0 and len(picked_up_before) == 0:
cmd = "p"
execute_command(controller, cmd, ADITIONAL_ARM_ARGS)
all_commands.append(cmd)
if controller.last_event.metadata["lastActionSuccess"] is False:
print("Failed to pick up ")
print("scene name", controller.last_event.metadata["sceneName"])
print("initial pose", initial_pose)
print("list of actions", all_commands)
break
after = datetime.datetime.now()
time_diff = after - before
seconds = time_diff.total_seconds()
all_timers.append(len(all_commands) / seconds)
final_state = get_current_full_state(
controller
) # made sure this does not require deep copy
scene_name = controller.last_event.metadata["sceneName"]
# TODO only when pick up has happened
dict_to_add = {
"initial_location": initial_location,
"initial_rotation": initial_rotation,
"all_commands": all_commands,
"final_state": final_state,
"initial_pose": initial_pose,
"scene_name": scene_name,
}
all_dict[len(all_dict)] = dict_to_add
# print('FPS', sum(all_timers) / len(all_timers))
return all_dict
def determinism_test(all_tests):
# Redo the actions 20 times:
# only do this if an object is picked up
for k, test_point in all_tests.items():
initial_location = test_point["initial_location"]
initial_rotation = test_point["initial_rotation"]
all_commands = test_point["all_commands"]
final_state = test_point["final_state"]
initial_pose = test_point["initial_pose"]
scene_name = test_point["scene_name"]
controller.reset(scene_name)
controller.step(
action="TeleportFull",
x=initial_location["x"],
y=initial_location["y"],
z=initial_location["z"],
rotation=dict(x=0, y=initial_rotation, z=0),
horizon=10,
)
controller.step("PausePhysicsAutoSim")
for cmd in all_commands:
execute_command(controller, cmd, ADITIONAL_ARM_ARGS)
current_state = get_current_full_state(controller)
if not two_dict_equal(final_state, current_state):
print("not deterministic")
print("scene name", controller.last_event.metadata["sceneName"])
print("initial pose", initial_pose)
print("list of actions", all_commands)
pdb.set_trace()
else:
print("test {} passed".format(k))
if __name__ == "__main__":
# all_dict = random_tests()
# with open('determinism_json.json' ,'w') as f:
# json.dump(all_dict, f)
with open("arm_test/determinism_json.json", "r") as f:
all_dict = json.load(f)
determinism_test(all_dict)
| ai2thor-main | arm_test/check_determinism_different_machines.py |
import os
import sys
root_dir = os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/..")
sys.path.insert(0, root_dir)
import ai2thor.controller
c = ai2thor.controller.Controller(
scene="FloorPlan1_physics",
gridSize=0.25,
width=900,
height=900,
agentMode="arm",
fieldOfView=100,
agentControllerType="mid-level",
targetFrameRate=30,
fixedDeltaTime=0.005,
)
print(c.build_url())
c.step(
action="TeleportFull",
x=-1,
y=0.9009995460510254,
z=1,
rotation=dict(x=0, y=180, z=0),
horizon=0,
)
c.step(
action="MoveMidLevelArm",
disableRendering=False,
position=dict(x=0.01, y=0, z=0.01),
speed=2,
returnToStart=False,
handCameraSpace=False,
)
c.step(
action="MoveArmBase", disableRendering=False, y=0.9, speed=2, returnToStart=False
)
pose = {"x": -1.0, "y": 0.9009995460510254, "z": 1, "rotation": 135, "horizon": 0}
c.step(
action="TeleportFull",
x=pose["x"],
y=pose["y"],
z=pose["z"],
rotation=dict(x=0.0, y=pose["rotation"], z=0.0),
horizon=pose["horizon"],
)
actions = [
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059573404, "y": 0.0, "z": 0.0281161666},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.8314809351552201,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.8018513467217335,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7722217582882469,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7425921698547602,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.7129625740138691,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6833329855803827,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6537033971468961,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295712, "y": 5.96046448e-08, "z": 0.0781169713},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295711979, "y": 0.0, "z": 0.098116833},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.029570736, "y": 1.1920929e-07, "z": 0.11811674},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957208, "y": 0.0, "z": 0.13811702},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295723379, "y": -1.1920929e-07, "z": 0.15811688000000002},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957233, "y": 1.1920929e-07, "z": 0.17811683099999998},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295722485, "y": 0.0, "z": 0.198116782},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.02957207, "y": -2.38418579e-07, "z": 0.2181169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.029571943, "y": -1.1920929e-07, "z": 0.23811695300000002},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295718536, "y": -1.1920929e-07, "z": 0.258116919},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295715183, "y": -1.1920929e-07, "z": 0.278117019},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295714289, "y": -1.1920929e-07, "z": 0.298117208},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295709223, "y": -1.1920929e-07, "z": 0.31811716},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295703411, "y": 0.0, "z": 0.338116872},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295695812, "y": -2.38418579e-07, "z": 0.358116376},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0295692533, "y": 0.0, "z": 0.378115761},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.6240738087134093,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5944442202799227,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431030000000003, "y": -5.96046448e-08, "z": 0.3481152},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.00956957, "y": -2.38418579e-07, "z": 0.398114669},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569496, "y": -1.1920929e-07, "z": 0.41811468},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569377, "y": -1.1920929e-07, "z": 0.43811484},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{"action": ""},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569332, "y": -1.1920929e-07, "z": 0.4581149},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569213, "y": -2.38418579e-07, "z": 0.478115},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5648146688834588,
"speed": 2,
"returnToStart": False,
},
{"action": ""},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5351850508203542,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5055554327572495,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.47592585913857194,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.47592585913857194,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569228, "y": -2.38418579e-07, "z": 0.498115051},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569168, "y": -2.38418579e-07, "z": 0.51811533},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.009569213, "y": -1.1920929e-07, "z": 0.5381154300000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{"action": ""},
{"action": "PickUpMidLevelHand"},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.575925859138572,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059569168000000006, "y": -2.98023224e-07, "z": 0.508115649},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.07956888000000001, "y": -3.57627869e-07, "z": 0.5081153},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -2.38418579e-07, "z": 0.558114934},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -3.57627869e-07, "z": 0.5781150340000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687425, "y": -2.38418579e-07, "z": 0.5981153},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495686531, "y": -2.38418579e-07, "z": 0.6181155},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495686829, "y": -2.38418579e-07, "z": 0.6381157000000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687127, "y": -3.57627869e-07, "z": 0.6181159},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495687127, "y": -4.76837158e-07, "z": 0.638116169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688319, "y": 0.0, "z": 0.6181162},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": 0.0, "z": 0.6381164},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.0495688021, "y": -2.38418579e-07, "z": 0.6181165000000001},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5055554327572495,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5203702417888018,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5351850508203542,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveArmBase",
"disableRendering": False,
"y": 0.5499998598519065,
"speed": 2,
"returnToStart": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.00043116810000000394, "y": -1.1920929e-07, "z": 0.5881169},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431060000000004, "y": -1.1920929e-07, "z": 0.5881164},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004312277000000017, "y": -2.38418579e-07, "z": 0.5881171},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020431149000000003, "y": 0.0, "z": 0.588116944},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004310200000000042, "y": -2.38418579e-07, "z": 0.5881175},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.07956903, "y": -1.1920929e-07, "z": 0.588117361},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.059569168000000006, "y": -3.57627869e-07, "z": 0.5881177},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.039569217000000004, "y": 2.38418579e-07, "z": 0.588118434},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": 0.019569176400000002, "y": -2.38418579e-07, "z": 0.588119},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.0004308938999999998, "y": -2.38418579e-07, "z": 0.5881196},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.020430815199999994, "y": -1.1920929e-07, "z": 0.5881202},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {
"x": -0.040430869999999994,
"y": -2.38418579e-07,
"z": 0.588120937,
},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.060430716999999995, "y": -1.1920929e-07, "z": 0.5881218},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.08043068299999999, "y": 0.0, "z": 0.588122368},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.10043055999999999, "y": 0.0, "z": 0.5881231},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.12043055600000001, "y": 0.0, "z": 0.5881235},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.140430626, "y": -2.38418579e-07, "z": 0.588124156},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{
"action": "MoveMidLevelArm",
"disableRendering": False,
"position": {"x": -0.16043072600000002, "y": 0.0, "z": 0.588124752},
"speed": 2,
"returnToStart": False,
"handCameraSpace": False,
},
{},
]
counter = 0
for a in actions:
if a == {} or a == {"action": ""}:
continue
c.step(a)
| ai2thor-main | arm_test/arm_counter_30fps_fixed_update.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.