python_code
stringlengths 0
4.04M
| repo_name
stringlengths 7
58
| file_path
stringlengths 5
147
|
---|---|---|
"""Unittests for Datasets."""
from itertools import product
import numpy as np
import pytest
from meerkat import concat
from meerkat.columns.object.base import ObjectColumn
from meerkat.columns.tensor.numpy import NumPyTensorColumn
from meerkat.dataframe import DataFrame
from meerkat.errors import ConcatError, ConcatWarning
from meerkat.ops.map import defer
from ...testbeds import AbstractColumnTestBed, MockDatapanel
from ...utils import product_parametrize
from ..columns.abstract import AbstractColumnTestBed, column_parametrize
from ..columns.deferred.test_deferred import DeferredColumnTestBed
from ..columns.deferred.test_image import ImageColumnTestBed
from ..columns.scalar.test_arrow import ArrowScalarColumnTestBed
from ..columns.scalar.test_pandas import PandasScalarColumnTestBed
from ..columns.tensor.test_numpy import NumPyTensorColumnTestBed
from ..columns.tensor.test_torch import TorchTensorColumnTestBed
# flake8: noqa: E501
@pytest.fixture(
**column_parametrize(
[
NumPyTensorColumnTestBed,
PandasScalarColumnTestBed,
TorchTensorColumnTestBed,
DeferredColumnTestBed,
ArrowScalarColumnTestBed,
ImageColumnTestBed,
]
)
)
def column_testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
@pytest.mark.parametrize(
"use_visible_columns,n",
product([True, False], [1, 2, 3]),
)
def test_dataframe_row_concat(use_visible_columns, n):
mock_df = MockDatapanel(
length=16,
use_visible_columns=use_visible_columns,
)
out = concat([mock_df.df] * n, axis="rows")
assert len(out) == len(mock_df.visible_rows) * n
assert isinstance(out, DataFrame)
assert set(out.columns) == set(mock_df.df.columns)
assert (out["a"].data == np.concatenate([mock_df.visible_rows] * n)).all()
assert out["b"].data == list(np.concatenate([mock_df.visible_rows] * n))
def test_dataframe_column_concat():
mock_df = MockDatapanel(
length=16,
use_visible_columns=False,
)
out = concat([mock_df.df[["a"]], mock_df.df[["b"]]], axis="columns")
assert len(out) == len(mock_df.visible_rows)
assert isinstance(out, DataFrame)
assert set(out.columns) == {"a", "b"}
assert list(out["a"].data) == out["b"].data
@product_parametrize(params={"n": [1, 2, 3]})
def test_concat(column_testbed: AbstractColumnTestBed, n: int):
col = column_testbed.col
out = concat([col] * n)
assert len(out) == len(col) * n
assert isinstance(out, type(col))
for i in range(n):
assert out[i * len(col) : (i + 1) * len(col)].is_equal(col)
def test_concat_same_columns():
a = DataFrame.from_batch({"a": [1, 2, 3]})
b = DataFrame.from_batch({"a": [2, 3, 4]})
out = concat([a, b], axis="columns", suffixes=["_a", "_b"])
assert out.columns == ["a_a", "a_b"]
assert list(out["a_a"].data) == [1, 2, 3]
assert list(out["a_b"].data) == [2, 3, 4]
def test_concat_different_type():
a = NumPyTensorColumn.from_array([1, 2, 3])
b = ObjectColumn.from_list([1, 2, 3])
with pytest.raises(ConcatError):
concat([a, b])
def test_concat_unsupported_type():
a = [1, 2, 3]
b = [4, 5, 6]
with pytest.raises(ConcatError):
concat([a, b])
def test_concat_unsupported_axis():
a = DataFrame.from_batch({"a": [1, 2, 3]})
b = DataFrame.from_batch({"b": [1, 2, 3]})
with pytest.raises(ConcatError):
concat([a, b], axis="abc")
def test_concat_different_column_names():
a = DataFrame.from_batch({"a": [1, 2, 3]})
b = DataFrame.from_batch({"b": [1, 2, 3]})
with pytest.raises(ConcatError):
concat([a, b], axis="rows")
def test_concat_different_lengths():
a = DataFrame.from_batch({"a": [1, 2, 3]})
b = DataFrame.from_batch({"b": [1, 2, 3, 4]})
with pytest.raises(ConcatError):
concat([a, b], axis="columns")
def test_empty_concat():
out = concat([])
assert isinstance(out, DataFrame)
def test_concat_deferred_column_different_fns():
"""Test concat with deferred columns that have different functions.
The fn of the first dataframe will be taken.
"""
a = DataFrame.from_batch({"a": [1, 2, 3]})
b = DataFrame.from_batch({"a": [4, 5, 6]})
a["fn"] = defer(a["a"], lambda x: x + 1)
b["fn"] = defer(b["a"], lambda x: x + 2)
with pytest.warns(ConcatWarning):
out = concat([a, b], axis="rows")
np.testing.assert_equal(np.asarray(out["fn"]().data), [2, 3, 4, 5, 6, 7])
# def test_concat_deferred_column_expand():
# """
# Deferred columns should expand to dataframes when concatenated.
# """
# a = DataFrame.from_batch({"a": [1, 2, 3]})
# b = DataFrame.from_batch({"a": [4, 5, 6]})
# a["fn"] = defer(a["a"], lambda x: x + 1)
# out = concat([a, b], axis="rows")
# np.testing.assert_equal(out["fn"]().data, [2, 3, 4, 5, 6, 7])
| meerkat-main | tests/meerkat/ops/test_concat.py |
import pytest
from meerkat import DeferredColumn
from meerkat.dataframe import DataFrame
from ...utils import product_parametrize
from ..columns.abstract import AbstractColumnTestBed, column_parametrize
# from ..columns.deferred.test_deferred import DeferredColumnTestBed
# from ..columns.deferred.test_image import ImageColumnTestBed
# from ..columns.scalar.test_arrow import ArrowScalarColumnTestBed
# from ..columns.scalar.test_pandas import PandasScalarColumnTestBed
from ..columns.tensor.test_numpy import NumPyTensorColumnTestBed
# from ..columns.tensor.test_torch import TorchTensorColumnTestBed
@pytest.fixture(
**column_parametrize(
[
NumPyTensorColumnTestBed,
# PandasScalarColumnTestBed,
# TorchTensorColumnTestBed,
# DeferredColumnTestBed,
# ArrowScalarColumnTestBed,
# ImageColumnTestBed,
]
)
)
def column_testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
@product_parametrize(
params={
"batched": [True, False],
"materialize": [True, False],
"use_ray": [False], # TODO: Put the tests with ray back
}
)
def test_map_return_single(
column_testbed: AbstractColumnTestBed,
batched: bool,
materialize: bool,
use_ray: bool,
):
"""`map`, single return,"""
if not (isinstance(column_testbed.col, DeferredColumn) or materialize):
# skip columns for which materialize has no effect
return
col = column_testbed.col
map_spec = column_testbed.get_map_spec(batched=batched, materialize=materialize)
def func(x):
out = map_spec["fn"](x)
return out
result = col.map(
func,
batch_size=4,
is_batched_fn=batched,
materialize=materialize,
output_type=map_spec.get("output_type", None),
use_ray=use_ray,
)
assert result.is_equal(map_spec["expected_result"])
# @product_parametrize(params={"batched": [True, False], "materialize": [True, False]})
# def test_map_return_single_w_kwarg(
# column_testbed: AbstractColumnTestBed, batched: bool, materialize: bool
# ):
# """`map`, single return,"""
# if not (isinstance(column_testbed.col, DeferredColumn) or materialize):
# # skip columns for which materialize has no effect
# return
# col = column_testbed.col
# kwarg = 2
# map_spec = column_testbed.get_map_spec(
# batched=batched, materialize=materialize, kwarg=kwarg
# )
# def func(x, k=0):
# out = map_spec["fn"](x, k=k)
# return out
# result = col.map(
# func,
# batch_size=4,
# is_batched_fn=batched,
# materialize=materialize,
# output_type=map_spec.get("output_type", None),
# k=kwarg,
# )
# assert result.is_equal(map_spec["expected_result"])
@product_parametrize(
params={
"batched": [True, False],
"materialize": [True, False],
"use_ray": [False], # TODO (dean): Multiple outputs not supported.
}
)
def test_map_return_multiple(
column_testbed: AbstractColumnTestBed,
batched: bool,
materialize: bool,
use_ray: bool,
):
"""`map`, single return,"""
if not (isinstance(column_testbed.col, DeferredColumn) or materialize):
# skip columns for which materialize has no effect
return
col = column_testbed.col
map_specs = {
"map1": column_testbed.get_map_spec(
batched=batched, materialize=materialize, salt=1
),
"map2": column_testbed.get_map_spec(
batched=batched, materialize=materialize, salt=2
),
}
def func(x):
out = {key: map_spec["fn"](x) for key, map_spec in map_specs.items()}
return out
result = col.map(
func,
batch_size=4,
is_batched_fn=batched,
materialize=materialize,
output_type={k: v.get("output_type", None) for k, v in map_specs.items()},
use_ray=use_ray,
)
assert isinstance(result, DataFrame)
for key, map_spec in map_specs.items():
assert result[key].is_equal(map_spec["expected_result"])
| meerkat-main | tests/meerkat/ops/test_map.py |
import hashlib
import os
import numpy as np
import PIL
import pytest
import torch
from PIL import Image
import meerkat as mk
from meerkat import embed
from meerkat.ops.embed import encoders
from meerkat.ops.embed.encoder import Encoder
class ImageColumnTestBed:
def __init__(
self,
tmpdir: str,
length: int = 16,
):
self.image_paths = []
self.image_arrays = []
self.ims = []
self.data = []
for i in range(0, length):
self.image_arrays.append((i * np.ones((4, 4, 3))).astype(np.uint8))
im = Image.fromarray(self.image_arrays[-1])
self.ims.append(im)
self.data.append(im)
filename = "{}.png".format(i)
im.save(os.path.join(tmpdir, filename))
self.image_paths.append(os.path.join(tmpdir, filename))
self.col = mk.FileColumn(
self.image_paths,
loader=Image.open,
)
class TextColumnTestBed:
def __init__(self, length: int = 16):
self.data = ["Row " * idx for idx in range(length)]
self.col = mk.ScalarColumn(self.data)
EMB_SIZE = 4
def simple_encode(batch: torch.Tensor):
value = batch.to(torch.float32).mean(dim=-1, keepdim=True)
return torch.ones(batch.shape[0], EMB_SIZE) * value
def simple_image_transform(image: PIL.Image):
return torch.tensor(np.asarray(image)).to(torch.float32)
def simple_text_transform(text: str):
return torch.tensor(
[
int.from_bytes(hashlib.sha256(token.encode("utf-8")).digest(), "big") % 100
for token in text.split(" ")
]
)[:1]
def _simple_encoder(variant: str = "ViT-B/32", device: str = "cpu"):
return {
"image": Encoder(encode=simple_encode, preprocess=simple_image_transform),
"text": Encoder(encode=simple_encode, preprocess=simple_text_transform),
}
@pytest.fixture()
def simple_encoder(monkeypatch):
if "_simple_encoder" not in encoders.names:
encoders.register(_simple_encoder)
return _simple_encoder
def test_embed_images(tmpdir: str, simple_encoder):
image_testbed = ImageColumnTestBed(tmpdir=tmpdir)
df = mk.DataFrame({"image": image_testbed.col})
df = embed(
data=df,
input="image",
encoder="_simple_encoder",
modality="image",
batch_size=4,
num_workers=0,
)
assert isinstance(df, mk.DataFrame)
assert "_simple_encoder(image)" in df
assert (
simple_image_transform(df["image"][0]()).mean()
== df["_simple_encoder(image)"][0].mean()
)
def test_embed_text(simple_encoder):
testbed = TextColumnTestBed()
df = mk.DataFrame({"text": testbed.col})
df = embed(
data=df,
input="text",
encoder="_simple_encoder",
modality="text",
batch_size=4,
num_workers=0,
)
assert isinstance(df, mk.DataFrame)
assert "_simple_encoder(text)" in df
assert (
simple_text_transform(df["text"][0]).to(torch.float32).mean()
== df["_simple_encoder(text)"][0].mean()
)
| meerkat-main | tests/meerkat/ops/embed/test__init__.py |
meerkat-main | tests/meerkat/ops/embed/__init__.py |
|
meerkat-main | tests/meerkat/ops/sliceby/__init __.py |
|
import numpy as np
from meerkat import NumPyTensorColumn, ObjectColumn
from meerkat.dataframe import DataFrame
from meerkat.ops.sliceby.groupby import GroupBy, groupby
# Comment for meeting 5/19: Testing group by multiple columns,
# single columns on list, on string.
# Different columns as by: including ListColumn, NumpyArrayColumn,
# TensorColumn
# Key index is NumpyArrays, TensorColumn
# Coverage: 100%
def assertNumpyArrayEquality(arr1, arr2):
assert np.allclose(arr1, arr2)
def test_group_by_type():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(
["sam", "liam", "sam", "owen", "liam", "connor", "connor"],
dtype=str,
)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, "name")
assert isinstance(df, GroupBy)
def test_tensor_column_by():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, "a")
out = df["b"].mean()
assertNumpyArrayEquality(out["b"].data, np.array([2.5, (2 + 3 + 6) / 3, 6]))
assertNumpyArrayEquality(out["a"].data, np.array([1, 2, 3]))
def test_group_by_integer_type():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, "a")
out = df["b"].mean()
assertNumpyArrayEquality(out["b"].data, np.array([2.5, (2 + 3 + 6) / 3, 6]))
assertNumpyArrayEquality(out["a"].data, np.array([1, 2, 3]))
def test_group_by_integer_type_md():
b = np.zeros((7, 4))
b[0, 0] = 4
b[1, 1] = 3
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn(b),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = df.groupby("a")
out = df["b"].mean(axis=0)
assertNumpyArrayEquality(out["b"][0], np.array([2, 0, 0, 0]))
assert out["a"][0] == 1
assert out["a"][1] == 2
assert out["a"][2] == 3
assertNumpyArrayEquality(out["b"][1], np.array([0, 1, 0, 0]))
assertNumpyArrayEquality(out["b"][2], np.array([0, 0, 0, 0]))
def test_group_by_integer_type_axis_passed():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, "a")
out = df["b"].mean(axis=0)
assertNumpyArrayEquality(out["b"].data, np.array([2.5, (2 + 3 + 6) / 3, 6]))
assertNumpyArrayEquality(out["a"].data, np.array([1, 2, 3]))
def test_group_by_integer_type_as_prop():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = df.groupby("a")
out = df["b"].mean()
assertNumpyArrayEquality(out["b"].data, np.array([2.5, (2 + 3 + 6) / 3, 6]))
assertNumpyArrayEquality(out["a"].data, np.array([1, 2, 3]))
def test_group_by_tensor_key():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, "a")
out = df["b"].mean()
assertNumpyArrayEquality(out["b"].data, np.array([2.5, (2 + 3 + 6) / 3, 6]))
assertNumpyArrayEquality(out["a"].data, np.array([1, 2, 3]))
def test_group_by_string_type():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, "name")
out = df["b"].mean()
assertNumpyArrayEquality(out["b"].data, np.array([2, 3.5, 4, 6.5]))
assert (out["name"].data == NumPyTensorColumn(["a", "b", "c", "d"]).data).all()
def test_group_by_string_type_multiple_keys():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, "name")
out = df[["b", "a"]].mean()
assert (np.linalg.norm(out["a"].data - np.array([1.5, 2.5, 1, 2.5]))) < 1e-10
assertNumpyArrayEquality(out["b"].data, np.array([2, 3.5, 4, 6.5]))
assert (out["name"].data == NumPyTensorColumn(["a", "b", "c", "d"]).data).all()
def test_group_by_by_string_type_as_list():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, ["name"])
out = df["b"].mean()
assertNumpyArrayEquality(out["b"].data, np.array([2, 3.5, 4, 6.5]))
assert (out["name"].data == NumPyTensorColumn(["a", "b", "c", "d"]).data).all()
def test_group_by_by_string_type_as_list_key_as_list():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, ["name"])
out = df[["b"]].mean()
assertNumpyArrayEquality(out["b"].data, np.array([2, 3.5, 4, 6.5]))
assert (out["name"].data == NumPyTensorColumn(["a", "b", "c", "d"]).data).all()
def test_group_by_float_should_fail():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
try:
groupby(df, ["c"])
assert False
except Exception:
assert True
def test_group_by_float_should_fail_nonexistent_column():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
try:
groupby(df, ["d"])
assert False
except Exception:
assert True
def test_group_by_by_string_type_as_list_key_as_list_mult_key_by_name():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, ["name"])
out = df[["b", "c"]].mean()
assert np.linalg.norm(out["c"].data - np.array([1.55, 3.75, 4.3, 7.05]) < 1e-10)
assertNumpyArrayEquality(out["b"].data, np.array([2, 3.5, 4, 6.5]))
assert (out["name"].data == NumPyTensorColumn(["a", "b", "c", "d"]).data).all()
def test_group_by_by_string_type_as_list_key_as_list_mult_key():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"a_diff": NumPyTensorColumn([1, 2, 2, 2, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, ["a", "a_diff"])
out = df[["b"]].mean()
assertNumpyArrayEquality(out["a"].data, np.array([1, 1, 2, 3]))
assertNumpyArrayEquality(out["a_diff"].data, np.array([1, 2, 2, 3]))
assertNumpyArrayEquality(out["b"].data, np.array([1, 4, 11.0 / 3.0, 6]))
def test_group_by_by_string_type_as_list_key_as_list_mult_key_tensor():
df = DataFrame(
{
"a": NumPyTensorColumn([1, 2, 2, 1, 3, 2, 3]),
"a_diff": NumPyTensorColumn([1, 2, 2, 2, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, ["a", "a_diff"])
out = df[["b"]].mean()
assertNumpyArrayEquality(out["a"].data, np.array([1, 1, 2, 3]))
assertNumpyArrayEquality(out["a_diff"].data, np.array([1, 2, 2, 3]))
assertNumpyArrayEquality(out["b"].data, np.array([1, 4, 11.0 / 3.0, 6]))
def test_simple_list_column():
df = DataFrame(
{
"a": ObjectColumn([1, 2, 2, 1, 3, 2, 3]),
"a_diff": NumPyTensorColumn([1, 2, 2, 2, 3, 2, 3]),
"name": NumPyTensorColumn(
np.array(["a", "b", "a", "c", "b", "d", "d"], dtype=str)
),
"b": NumPyTensorColumn([1, 2, 3, 4, 5, 6, 7]),
"c": NumPyTensorColumn([1.0, 3.2, 2.1, 4.3, 5.4, 6.5, 7.6]),
}
)
df = groupby(df, ["a", "a_diff"])
out = df[["b"]].mean()
assertNumpyArrayEquality(out["a"].data, np.array([1, 1, 2, 3]))
assertNumpyArrayEquality(out["a_diff"].data, np.array([1, 2, 2, 3]))
assertNumpyArrayEquality(out["b"].data, np.array([1, 4, 11.0 / 3.0, 6]))
| meerkat-main | tests/meerkat/ops/sliceby/test_groupby.py |
from functools import wraps
from itertools import product
from typing import Any, Dict, List, Type, Union
import numpy as np
import pytest
def column_parametrize(
testbed_classes: List[Union[Type, Dict]],
config: dict = None,
single: bool = False,
):
params = [
c.get_params(config=config, single=single) if isinstance(c, type) else c
for c in testbed_classes
]
return {
"params": sum([p["argvalues"] for p in params], []),
"ids": sum([p["ids"] for p in params], []),
}
@pytest.fixture
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
def obj_to_id(obj: Any):
return str(obj)
class AbstractColumnTestBed:
DEFAULT_CONFIG = {}
# subclasses can add pytest marks which will be applied to all
# tests using the testbed
marks: pytest.Mark = None
@classmethod
def get_params(
cls, config: dict = None, params: dict = None, single: bool = False
) -> Dict[str, Any]:
updated_config = cls.DEFAULT_CONFIG.copy()
if config is not None:
updated_config.update(config)
configs = [
(cls, config)
if cls.marks is None
else pytest.param((cls, config), marks=cls.marks)
for config in map(
dict,
product(*[[(k, v) for v in vs] for k, vs in updated_config.items()]),
)
]
if single:
configs = configs[:1]
if params is None:
return {
"argnames": "testbed",
"argvalues": configs,
"ids": [str(config) for config in configs],
}
else:
argvalues = list(product(configs, *params.values()))
return {
"argnames": "testbed," + ",".join(params.keys()),
"argvalues": argvalues,
"ids": [",".join(map(str, values)) for values in argvalues],
}
@classmethod
@wraps(pytest.mark.parametrize)
def parametrize(
cls,
config: dict = None,
params: dict = None,
single: bool = False,
):
return pytest.mark.parametrize(
**cls.get_params(config=config, single=single), indirect=["testbed"]
)
@classmethod
def single(cls, tmpdir):
return cls(**cls.get_params(single=True)["argvalues"][0][1], tmpdir=tmpdir)
def get_map_spec(self, key: str = "default"):
raise NotImplementedError()
def get_data(self, index):
raise NotImplementedError()
def get_data_to_set(self, index):
# only mutable columns need implement this
pass
@staticmethod
def assert_data_equal(data1: np.ndarray, data2: np.ndarray):
raise NotImplementedError()
| meerkat-main | tests/meerkat/columns/abstract.py |
import os
import numpy as np
import pandas as pd
import pytest
from meerkat import DeferredColumn, NumPyTensorColumn, TorchTensorColumn
from meerkat.columns.deferred.base import DeferredCell
from meerkat.columns.scalar.pandas import PandasScalarColumn
from meerkat.errors import ConversionError, ImmutableError
from ...utils import product_parametrize
from .abstract import AbstractColumnTestBed, column_parametrize
from .deferred.test_deferred import DeferredColumnTestBed
from .deferred.test_image import ImageColumnTestBed
from .scalar.test_arrow import ArrowScalarColumnTestBed
from .scalar.test_pandas import PandasScalarColumnTestBed
from .tensor.test_numpy import NumPyTensorColumnTestBed
from .tensor.test_torch import TorchTensorColumnTestBed
@pytest.fixture(
**column_parametrize(
[
NumPyTensorColumnTestBed,
PandasScalarColumnTestBed,
TorchTensorColumnTestBed,
DeferredColumnTestBed,
ArrowScalarColumnTestBed,
# FileColumnTestBed,
ImageColumnTestBed,
# AudioColumnTestBed,
]
)
)
def column_testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
@pytest.fixture(
**column_parametrize(
[
NumPyTensorColumnTestBed,
PandasScalarColumnTestBed,
TorchTensorColumnTestBed,
DeferredColumnTestBed,
ArrowScalarColumnTestBed,
],
single=True,
),
)
def single_column_testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
@product_parametrize(params={"index_type": [np.array, list]})
def test_getitem(column_testbed, index_type: type):
col = column_testbed.col
result = col[1]
if isinstance(result, DeferredCell):
result = result()
column_testbed.assert_data_equal(column_testbed.get_data(1), result)
for index in [
slice(2, 4, 1),
(np.arange(len(col)) % 2).astype(bool),
np.array([0, 3, 5, 6]),
]:
col_index = index_type(index) if not isinstance(index, slice) else index
data = column_testbed.get_data(index)
result = col[col_index]
if isinstance(result, DeferredColumn):
result = result()
column_testbed.assert_data_equal(data, result.data)
if type(result) == type(col):
# if the getitem returns a column of the same type, enforce that all the
# attributes were cloned over appropriately. We don't want to check this
# for columns that return columns of different type from getitem
# (e.g. LambdaColumn)
assert col._clone(data=data).is_equal(result)
@product_parametrize(params={"index_type": [np.array, list, pd.Series]})
def test_set_item(column_testbed, index_type: type):
MUTABLE_COLUMNS = (NumPyTensorColumn, TorchTensorColumn, PandasScalarColumn)
col = column_testbed.col
for index in [
1,
slice(2, 4, 1),
(np.arange(len(col)) % 2).astype(bool),
np.array([0, 3, 5, 6]),
]:
col_index = index_type(index) if isinstance(index, np.ndarray) else index
data_to_set = column_testbed.get_data_to_set(index)
if isinstance(col, MUTABLE_COLUMNS):
col[col_index] = data_to_set
if isinstance(index, int):
column_testbed.assert_data_equal(data_to_set, col[col_index])
else:
column_testbed.assert_data_equal(data_to_set, col[col_index].data)
else:
with pytest.raises(ImmutableError):
col[col_index] = data_to_set
def test_copy(column_testbed: AbstractColumnTestBed):
col, _ = column_testbed.col, column_testbed.data
col_copy = col.copy()
assert isinstance(col_copy, type(col))
assert col.is_equal(col_copy)
@pytest.mark.skip
def test_pickle(column_testbed):
import dill as pickle # needed so that it works with lambda functions
# important for dataloader
col = column_testbed.col
buf = pickle.dumps(col)
new_col = pickle.loads(buf)
assert isinstance(new_col, type(col))
if isinstance(new_col, DeferredColumn):
# the lambda function isn't exactly the same after reading
new_col.data.fn = col.data.fn
assert col.is_equal(new_col)
def test_io(tmp_path, column_testbed: AbstractColumnTestBed):
# uses the tmp_path fixture which will provide a
# temporary directory unique to the test invocation,
# important for dataloader
col, _ = column_testbed.col, column_testbed.data
path = os.path.join(tmp_path, "test")
col.write(path)
new_col = type(col).read(path)
assert isinstance(new_col, type(col))
if isinstance(new_col, DeferredColumn):
# the lambda function isn't exactly the same after reading
new_col.data.fn = col.data.fn
assert col.is_equal(new_col)
def test_head(single_column_testbed: AbstractColumnTestBed):
testbed = single_column_testbed
length = 10
result = testbed.col.head(length)
assert len(result) == length
assert result.is_equal(testbed.col[:length])
def test_tail(single_column_testbed: AbstractColumnTestBed):
testbed = single_column_testbed
length = 10
result = testbed.col.tail(length)
assert len(result) == length
assert result.is_equal(testbed.col[-length:])
def test_repr_html(single_column_testbed: AbstractColumnTestBed):
testbed = single_column_testbed
testbed.col._repr_html_()
def test_str(single_column_testbed: AbstractColumnTestBed):
testbed = single_column_testbed
result = str(testbed.col)
assert isinstance(result, str)
def test_repr(single_column_testbed: AbstractColumnTestBed):
testbed = single_column_testbed
result = repr(testbed.col)
assert isinstance(result, str)
def test_streamlit(single_column_testbed: AbstractColumnTestBed):
testbed = single_column_testbed
testbed.col.streamlit()
def test_repr_pandas(single_column_testbed: AbstractColumnTestBed):
testbed = single_column_testbed
series, _ = testbed.col._repr_pandas_()
assert isinstance(series, pd.Series)
def test_to_pandas(single_column_testbed: AbstractColumnTestBed):
testbed = single_column_testbed
if isinstance(testbed.col, DeferredColumn):
with pytest.raises(ConversionError):
series = testbed.col.to_pandas()
else:
series = testbed.col.to_pandas()
assert isinstance(series, pd.Series)
def test_to_torch(single_column_testbed: AbstractColumnTestBed):
pass
| meerkat-main | tests/meerkat/columns/test_common.py |
meerkat-main | tests/meerkat/columns/__init__.py |
|
"""Unittests for NumpyColumn."""
from __future__ import annotations
import os
from typing import List, Union
import numpy as np
import pytest
import torch
import torchaudio
from meerkat import AudioColumn
from meerkat.columns.abstract import Column
from meerkat.columns.deferred.base import DeferredCell
from meerkat.columns.deferred.file import FileCell
from meerkat.columns.scalar import ScalarColumn
from ..abstract import AbstractColumnTestBed
def simple_transform(audio):
return 2 * audio
def loader(filepath):
return torchaudio.load(filepath)[0]
class AudioColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"transform": [True, False],
"use_base_dir": [True, False],
}
marks = pytest.mark.audio_col
def __init__(
self,
tmpdir: str,
length: int = 16,
transform: bool = False,
use_base_dir: bool = False,
seed: int = 123,
):
self.audio_paths = []
self.audio_arrays = []
self.data = []
transform = simple_transform if transform else None
self.base_dir = tmpdir if use_base_dir else None
for i in range(0, length):
# we want the audio to be variable length to test the collate
audio = torch.tensor(
(1 / (i + 1)) * np.ones((1, 16 + i)).astype(np.float32)
)
self.audio_arrays.append(audio)
self.data.append(transform(audio) if transform else audio)
filename = "{}.wav".format(i)
torchaudio.save(
os.path.join(tmpdir, filename),
audio,
sample_rate=16,
)
if use_base_dir:
self.audio_paths.append(filename)
else:
self.audio_paths.append(os.path.join(tmpdir, filename))
self.transform = transform
self.col = AudioColumn.from_filepaths(
self.audio_paths,
transform=transform,
loader=loader,
base_dir=self.base_dir,
)
def get_data(self, index, materialize: bool = True):
if materialize:
if isinstance(index, int):
return self.data[index]
index = np.arange(len(self.data))[index]
return [self.data[idx] for idx in index]
else:
if isinstance(index, int):
return FileCell(
data=self.audio_paths[index],
loader=self.col.loader,
transform=self.col.transform,
base_dir=self.base_dir,
)
index = np.arange(len(self.data))[index]
return ScalarColumn([self.audio_paths[idx] for idx in index])
@staticmethod
def assert_data_equal(
data1: Union[Column, List, torch.Tensor],
data2: Union[Column, List, torch.Tensor],
):
def unpad_and_compare(padded: torch.Tensor, data: List):
for row_idx in range(padded.shape[0]):
padded_row = padded[row_idx]
unpadded_row = padded_row[padded_row != 0]
assert torch.allclose(unpadded_row, data[row_idx])
if isinstance(data1, Column) and isinstance(data2, Column):
assert data1.is_equal(data2)
elif torch.is_tensor(data1) and torch.is_tensor(data2):
assert torch.allclose(data1, data2)
elif torch.is_tensor(data1) and isinstance(data2, List):
# because the waveforms are of different lengths, collate will put them
# into a padded tensor, so we use unpad_and_compare to compare to the
# original unpadded data
unpad_and_compare(data1, data2)
elif torch.is_tensor(data2) and isinstance(data1, List):
unpad_and_compare(data2, data1)
elif isinstance(data1, DeferredCell):
assert data1 == data2
else:
raise ValueError(
"Cannot assert data equal between objects type:"
f" {type(data1), type(data2)}"
)
| meerkat-main | tests/meerkat/columns/deferred/test_audio.py |
"""Unittests for NumpyColumn."""
from __future__ import annotations
import os
from typing import List, Union
import numpy as np
import pandas as pd
import pytest
import torch
import torchvision.datasets.folder as folder
from PIL import Image
import meerkat
from meerkat import ImageColumn
from meerkat.block.deferred_block import DeferredCellOp, DeferredOp
from meerkat.columns.abstract import Column
from meerkat.columns.deferred.base import DeferredCell
from meerkat.columns.deferred.file import FileCell
from meerkat.columns.object.base import ObjectColumn
from meerkat.columns.scalar import ScalarColumn
from ....utils import product_parametrize
from ..abstract import AbstractColumnTestBed, column_parametrize
class ImageColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"use_base_dir": [True, False],
}
marks = pytest.mark.image_col
def __init__(
self,
tmpdir: str,
length: int = 16,
use_base_dir: bool = False,
seed: int = 123,
):
self.image_paths = []
self.image_arrays = []
self.ims = []
self.data = []
self.base_dir = tmpdir if use_base_dir else ""
for i in range(0, length):
self.image_arrays.append((i * np.ones((4, 4, 3))).astype(np.uint8))
im = Image.fromarray(self.image_arrays[-1])
self.ims.append(im)
self.data.append(im)
filename = "{}.png".format(i)
im.save(os.path.join(tmpdir, filename))
if use_base_dir:
self.image_paths.append(filename)
else:
self.image_paths.append(os.path.join(tmpdir, filename))
self.col = ImageColumn(
self.image_paths,
loader=folder.default_loader,
base_dir=self.base_dir,
)
def get_map_spec(
self,
batched: bool = True,
materialize: bool = False,
kwarg: int = 0,
salt: int = 1,
):
if not materialize:
if batched:
return {"fn": lambda x, k=0: x, "expected_result": self.col}
else:
# can't check for cell column equivalence because the `fn` is a bound
# method of different objects (since we perform batching then convert)
# non-batched fns to batched functions, so we call get
return {
"fn": lambda x, k=0: x.get().rotate(45 + salt + k),
"expected_result": ObjectColumn(
[im.rotate(45 + salt + kwarg) for im in self.ims]
),
}
else:
return {
"fn": (lambda x, k=0: [im.rotate(45 + salt + k) for im in x])
if batched
else (lambda x, k=0: x.rotate(45 + salt + k)),
"expected_result": ObjectColumn(
[im.rotate(45 + salt + kwarg) for im in self.ims]
),
}
def get_filter_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
if not materialize:
if batched:
return {
"fn": lambda x, k=0: [
int(os.path.splitext(os.path.basename(cell.data))[0])
< (4 + salt + k)
for cell in x
],
"expected_result": self.col[: 4 + salt + kwarg],
}
else:
return {
"fn": (
lambda x, k=0: int(
os.path.splitext(os.path.basename(x.data))[0]
)
< (4 + salt + k)
),
"expected_result": self.col[: 4 + salt + kwarg],
}
else:
return {
"fn": (lambda x, k=0: [im.rotate(45 + salt + k) for im in x])
if batched
else (lambda x, k=0: x.rotate(45 + salt + k)),
"expected_result": ObjectColumn(
[im.rotate(45 + salt + kwarg) for im in self.ims]
),
}
def get_data(self, index, materialize: bool = True):
if materialize:
if isinstance(index, int):
return self.data[index]
index = np.arange(len(self.data))[index]
return [self.data[idx] for idx in index]
else:
if isinstance(index, int):
return FileCell(
DeferredCellOp(
args=[self.image_paths[index]],
kwargs={},
fn=self.col.fn,
is_batched_fn=False,
return_index=None,
)
)
index = np.arange(len(self.data))[index]
col = ScalarColumn([self.image_paths[idx] for idx in index])
return DeferredOp(
args=[col], kwargs={}, fn=self.col.fn, is_batched_fn=False, batch_size=1
)
@staticmethod
def assert_data_equal(
data1: Union[Image.Image, Column, List, torch.Tensor],
data2: Union[Image.Image, Column, List, torch.Tensor],
):
if isinstance(data1, Image.Image) or isinstance(data1, List):
assert data1 == data2
elif isinstance(data1, Column):
assert data1.is_equal(data2)
elif torch.is_tensor(data1):
print(data2)
assert (data1 == data2).all()
elif isinstance(data1, DeferredCell):
assert data1 == data2
elif isinstance(data1, DeferredOp):
assert data1.is_equal(data2)
else:
raise ValueError(
"Cannot assert data equal between objects type:"
f" {type(data1), type(data2)}"
)
@pytest.fixture(**column_parametrize([ImageColumnTestBed]))
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
@product_parametrize(params={"max_rows": [6, 16, 20]})
def test_repr_pandas(testbed, max_rows):
meerkat.config.display.max_rows = max_rows
series, _ = testbed.col._repr_pandas_()
assert isinstance(series, pd.Series)
assert len(series) == min(len(series), max_rows + 1)
def test_repr_when_transform_produces_invalid_image(testbed):
from torchvision.transforms import ToTensor
def mean_transform(cell):
return ToTensor()(cell).mean(dim=[1, 2])
testbed.col.transform = mean_transform
testbed.col._repr_html_()
| meerkat-main | tests/meerkat/columns/deferred/test_image.py |
meerkat-main | tests/meerkat/columns/deferred/__init__.py |
|
import json
import os
from typing import Union
import dill
import numpy as np
import pytest
from PIL import Image
import meerkat as mk
from meerkat.block.deferred_block import DeferredCellOp, DeferredOp
from meerkat.columns.deferred.base import DeferredCell
from meerkat.columns.deferred.file import FILE_TYPES, FileCell, FileColumn, FileLoader
from meerkat.columns.scalar import ScalarColumn
from tests.meerkat.columns.abstract import AbstractColumnTestBed, column_parametrize
from tests.utils import product_parametrize
def load_json(path):
with open(path, "r") as f:
return json.load(f)
def add_one(data):
return data + 1
class FileColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"use_base_dir": [True, False],
}
marks = pytest.mark.file_col
def __init__(
self,
tmpdir: str,
length: int = 16,
use_base_dir: bool = False,
seed: int = 123,
):
self.paths = []
self.data = []
self.tmp_dir = tmpdir
self.files_dir = os.path.join(tmpdir, "files")
os.makedirs(self.files_dir, exist_ok=True)
self.base_dir = self.files_dir if use_base_dir else None
for i in range(0, length):
# write a simple json file to disk
filename = "file_{}.json".format(i)
path = os.path.join(self.files_dir, filename)
with open(path, "w") as f:
json.dump(i, f)
self.data.append(i)
if use_base_dir:
self.paths.append(filename)
else:
self.paths.append(os.path.join(self.files_dir, filename))
self.data = np.arange(length)
self.col = mk.files(
self.paths,
loader=load_json,
base_dir=self.base_dir,
)
def get_data(self, index, materialize: bool = True):
if materialize:
return self.data[index]
else:
if isinstance(index, int):
return FileCell(
DeferredCellOp(
args=[self.paths[index]],
kwargs={},
fn=self.col.fn,
is_batched_fn=False,
return_index=None,
)
)
index = np.arange(len(self.data))[index]
col = ScalarColumn([self.paths[idx] for idx in index])
return DeferredOp(
args=[col], kwargs={}, fn=self.col.fn, is_batched_fn=False, batch_size=1
)
@staticmethod
def assert_data_equal(
data1: Union[np.ndarray, DeferredCell, DeferredOp],
data2: Union[np.ndarray, DeferredCell, DeferredOp],
):
if isinstance(data1, (int, np.int64)):
assert data1 == data2
elif isinstance(data1, np.ndarray):
assert (data1 == data2).all()
elif isinstance(data1, DeferredCell):
assert data1 == data2
elif isinstance(data1, DeferredOp):
assert data1.is_equal(data2)
else:
raise ValueError(
"Cannot assert data equal between objects type:"
f" {type(data1), type(data2)}"
)
@pytest.fixture(**column_parametrize([FileColumnTestBed]))
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
@FileColumnTestBed.parametrize(
config={
"use_base_dir": [True],
}
)
def test_change_base_dir(testbed):
assert testbed.base_dir is not None
col = testbed.col
new_dir = os.path.join(testbed.tmp_dir, "new_files")
os.rename(testbed.files_dir, new_dir)
col.base_dir = new_dir
assert (col[[1, 3, 5]]().data == testbed.get_data([1, 3, 5])).all()
BUCKET_URL = "https://storage.googleapis.com/meerkat-ml/tests/file_types"
TEST_FILES = {
"image": [
os.path.join(BUCKET_URL, "image/test-img-01.jpg"),
os.path.join(BUCKET_URL, "image/test-img-02.jpg"),
os.path.join(BUCKET_URL, "image/test-img-01.png"),
os.path.join(BUCKET_URL, "image/test-img-02.png"),
os.path.join(BUCKET_URL, "image/test-img-01.jpeg"),
os.path.join(BUCKET_URL, "image/test-img-02.jpeg"),
],
"pdf": [
os.path.join(BUCKET_URL, "pdf/test-pdf-01.pdf"),
os.path.join(BUCKET_URL, "pdf/test-pdf-02.pdf"),
os.path.join(BUCKET_URL, "pdf/test-pdf-03.pdf"),
],
"text": [
os.path.join(BUCKET_URL, "text/test-txt-01.txt"),
os.path.join(BUCKET_URL, "text/test-txt-02.txt"),
os.path.join(BUCKET_URL, "text/test-txt-03.txt"),
],
"html": [
os.path.join(BUCKET_URL, "html/test-html-01.html"),
os.path.join(BUCKET_URL, "html/test-html-02.html"),
os.path.join(BUCKET_URL, "html/test-html-03.html"),
],
"code": [
os.path.join(BUCKET_URL, "code/test-code-01.py"),
os.path.join(BUCKET_URL, "code/test-code-02.py"),
os.path.join(BUCKET_URL, "code/test-code-03.py"),
],
}
@product_parametrize({"file_type": list(TEST_FILES.keys())})
def test_file_types(file_type: str, tmpdir):
files = TEST_FILES[file_type]
col = mk.files(files)
assert isinstance(col, FileColumn)
assert isinstance(col.formatters, FILE_TYPES[file_type]["formatters"])
col.formatters["base"].encode(col[0])
def test_downloader(monkeypatch, tmpdir):
import urllib
ims = []
def patched_urlretrieve(url, filename):
img_array = np.ones((4, 4, 3)).astype(np.uint8)
im = Image.fromarray(img_array)
ims.append(im)
im.save(filename)
monkeypatch.setattr(urllib.request, "urlretrieve", patched_urlretrieve)
downloader = FileLoader(
loader=Image.open, downloader="url", cache_dir=os.path.join(tmpdir, "cache")
)
out = downloader("https://test.com/dir/2.jpg")
assert os.path.exists(os.path.join(tmpdir, "cache", "test.com/dir/2.jpg"))
assert (np.array(out) == np.array(ims[0])).all()
out = downloader("https://test.com/dir/2.jpg")
assert len(ims) == 1
out = downloader("https://test.com/dir/3.jpg")
assert len(ims) == 2
def test_fallback_download(monkeypatch, tmpdir):
import urllib
def patched_urlretrieve(url, filename):
raise urllib.error.HTTPError(url, 404, "Not found", None, None)
monkeypatch.setattr(urllib.request, "urlretrieve", patched_urlretrieve)
ims = []
def fallback(filename):
img_array = np.ones((4, 4, 3)).astype(np.uint8)
im = Image.fromarray(img_array)
ims.append(im)
im.save(filename)
downloader = FileLoader(
loader=Image.open,
downloader="url",
fallback_downloader=fallback,
cache_dir=os.path.join(tmpdir, "cache"),
)
with pytest.warns(UserWarning):
out = downloader("https://test.com/dir/2.jpg")
assert os.path.exists(os.path.join(tmpdir, "cache", "test.com/dir/2.jpg"))
assert (np.array(out) == np.array(ims[0])).all()
out = downloader("https://test.com/dir/2.jpg")
assert len(ims) == 1
with pytest.warns(UserWarning):
out = downloader("https://test.com/dir/3.jpg")
assert len(ims) == 2
def test_serialize_downloader(tmpdir):
downloader = FileLoader(
loader=Image.open,
downloader="url",
cache_dir=os.path.join(tmpdir, "cache"),
)
dill.dump(downloader, open(os.path.join(tmpdir, "downloader.pkl"), "wb"))
downloader = dill.load(open(os.path.join(tmpdir, "downloader.pkl"), "rb"))
assert downloader.cache_dir == os.path.join(tmpdir, "cache")
| meerkat-main | tests/meerkat/columns/deferred/test_file_column.py |
import json
import os
from typing import Union
import dill
import numpy as np
import pytest
from PIL import Image
from meerkat.block.deferred_block import DeferredCellOp, DeferredOp
from meerkat.columns.deferred.base import DeferredCell
from meerkat.columns.deferred.file import FileCell, FileColumn, FileLoader
from meerkat.columns.scalar import ScalarColumn
from tests.meerkat.columns.abstract import AbstractColumnTestBed, column_parametrize
def load_json(path):
with open(path, "r") as f:
return json.load(f)
def add_one(data):
return data + 1
class FileColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"use_base_dir": [True, False],
}
marks = pytest.mark.file_col
def __init__(
self,
tmpdir: str,
length: int = 16,
use_base_dir: bool = False,
seed: int = 123,
):
self.paths = []
self.data = []
self.tmp_dir = tmpdir
self.files_dir = os.path.join(tmpdir, "files")
os.makedirs(self.files_dir, exist_ok=True)
self.base_dir = self.files_dir if use_base_dir else None
for i in range(0, length):
# write a simple json file to disk
filename = "file_{}.json".format(i)
path = os.path.join(self.files_dir, filename)
with open(path, "w") as f:
json.dump(i, f)
self.data.append(i)
if use_base_dir:
self.paths.append(filename)
else:
self.paths.append(os.path.join(self.files_dir, filename))
self.data = np.arange(length)
self.col = FileColumn(
self.paths,
loader=load_json,
base_dir=self.base_dir,
)
def get_data(self, index, materialize: bool = True):
if materialize:
return self.data[index]
else:
if isinstance(index, int):
return FileCell(
DeferredCellOp(
args=[self.paths[index]],
kwargs={},
fn=self.col.fn,
is_batched_fn=False,
return_index=None,
)
)
index = np.arange(len(self.data))[index]
col = ScalarColumn([self.paths[idx] for idx in index])
return DeferredOp(
args=[col], kwargs={}, fn=self.col.fn, is_batched_fn=False, batch_size=1
)
@staticmethod
def assert_data_equal(
data1: Union[np.ndarray, DeferredCell, DeferredOp],
data2: Union[np.ndarray, DeferredCell, DeferredOp],
):
if isinstance(data1, (int, np.int64)):
assert data1 == data2
elif isinstance(data1, np.ndarray):
assert (data1 == data2).all()
elif isinstance(data1, DeferredCell):
assert data1 == data2
elif isinstance(data1, DeferredOp):
assert data1.is_equal(data2)
else:
raise ValueError(
"Cannot assert data equal between objects type:"
f" {type(data1), type(data2)}"
)
@pytest.fixture(**column_parametrize([FileColumnTestBed]))
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
@FileColumnTestBed.parametrize(
config={
"use_base_dir": [True],
}
)
def test_change_base_dir(testbed):
assert testbed.base_dir is not None
col = testbed.col
new_dir = os.path.join(testbed.tmp_dir, "new_files")
os.rename(testbed.files_dir, new_dir)
col.base_dir = new_dir
assert (col[[1, 3, 5]]().values == testbed.get_data([1, 3, 5])).all()
def test_downloader(monkeypatch, tmpdir):
import urllib
ims = []
def patched_urlretrieve(url, filename):
img_array = np.ones((4, 4, 3)).astype(np.uint8)
im = Image.fromarray(img_array)
ims.append(im)
im.save(filename)
monkeypatch.setattr(urllib.request, "urlretrieve", patched_urlretrieve)
downloader = FileLoader(
loader=Image.open, downloader="url", cache_dir=os.path.join(tmpdir, "cache")
)
out = downloader("https://test.com/dir/2.jpg")
assert os.path.exists(os.path.join(tmpdir, "cache", "test.com/dir/2.jpg"))
assert (np.array(out) == np.array(ims[0])).all()
out = downloader("https://test.com/dir/2.jpg")
assert len(ims) == 1
out = downloader("https://test.com/dir/3.jpg")
assert len(ims) == 2
def test_fallback_download(monkeypatch, tmpdir):
import urllib
def patched_urlretrieve(url, filename):
raise urllib.error.HTTPError(url, 404, "Not found", None, None)
monkeypatch.setattr(urllib.request, "urlretrieve", patched_urlretrieve)
ims = []
def fallback(filename):
img_array = np.ones((4, 4, 3)).astype(np.uint8)
im = Image.fromarray(img_array)
ims.append(im)
im.save(filename)
downloader = FileLoader(
loader=Image.open,
downloader="url",
fallback_downloader=fallback,
cache_dir=os.path.join(tmpdir, "cache"),
)
with pytest.warns(UserWarning):
out = downloader("https://test.com/dir/2.jpg")
assert os.path.exists(os.path.join(tmpdir, "cache", "test.com/dir/2.jpg"))
assert (np.array(out) == np.array(ims[0])).all()
out = downloader("https://test.com/dir/2.jpg")
assert len(ims) == 1
with pytest.warns(UserWarning):
out = downloader("https://test.com/dir/3.jpg")
assert len(ims) == 2
def test_serialize_downloader(tmpdir):
downloader = FileLoader(
loader=Image.open,
downloader="url",
cache_dir=os.path.join(tmpdir, "cache"),
)
dill.dump(downloader, open(os.path.join(tmpdir, "downloader.pkl"), "wb"))
downloader = dill.load(open(os.path.join(tmpdir, "downloader.pkl"), "rb"))
assert downloader.cache_dir == os.path.join(tmpdir, "cache")
| meerkat-main | tests/meerkat/columns/deferred/test_file.py |
"""Unittests for LambdaColumn."""
from typing import Type
import numpy as np
import pytest
import meerkat as mk
from meerkat import DeferredColumn, NumPyTensorColumn, ObjectColumn
from meerkat.errors import ConcatWarning
from ....testbeds import MockColumn, MockDatapanel
from ..abstract import AbstractColumnTestBed, column_parametrize
class DeferredColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"batched": [True, False],
"from_df": [True, False],
"multiple_outputs": [True, False],
}
marks = pytest.mark.lambda_col
def __init__(
self,
batched: bool,
from_df: bool,
multiple_outputs: bool,
length: int = 16,
seed: int = 123,
tmpdir: str = None,
):
defer_kwargs = {
"is_batched_fn": batched,
"batch_size": 4 if batched else 1,
}
np.random.seed(seed)
array = np.random.random(length) * 10
self.col = mk.NumPyTensorColumn(array).defer(
function=lambda x: x + 2, **defer_kwargs
)
self.data = array + 2
def get_map_spec(
self,
batched: bool = True,
materialize: bool = False,
kwarg: int = 0,
salt: int = 1,
):
if materialize:
return {
"fn": lambda x, k=0: x + salt + k,
"expected_result": NumPyTensorColumn.from_array(
self.data + salt + kwarg
),
}
else:
if batched:
return {
"fn": lambda x, k=0: np.array([cell.get() for cell in x])
+ salt
+ k,
"expected_result": NumPyTensorColumn.from_array(
self.data + salt + kwarg
),
}
else:
return {
"fn": lambda x, k=0: x.get() + salt + k,
"expected_result": NumPyTensorColumn.from_array(
self.data + salt + kwarg
),
}
def get_data(self, index, materialize: bool = True):
if materialize:
return self.data[index]
else:
raise NotImplementedError()
def get_data_to_set(self, index):
return 0
@staticmethod
def assert_data_equal(data1: np.ndarray, data2: np.ndarray):
if isinstance(data1, np.ndarray):
assert (data1 == data2).all()
else:
assert data1 == data2
@pytest.fixture(**column_parametrize([DeferredColumnTestBed]))
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
@pytest.mark.parametrize(
"col_type", [NumPyTensorColumn, NumPyTensorColumn, ObjectColumn]
)
def test_column_defer(col_type: Type):
testbed = MockColumn(col_type=col_type)
col = testbed.col
# Build a dataset from a batch
lambda_col = col.defer(lambda x: x + 1)
assert isinstance(lambda_col, DeferredColumn)
assert (lambda_col() == testbed.array[testbed.visible_rows] + 1).all()
@pytest.mark.parametrize(
"use_visible_columns",
[True, False],
)
def test_df_defer(use_visible_columns: bool):
length = 16
testbed = MockDatapanel(
use_visible_columns=use_visible_columns,
length=length,
)
df = testbed.df
# Build a dataset from a batch
lambda_col = df.defer(lambda x: x["a"] + 1)
assert isinstance(lambda_col, DeferredColumn)
assert (lambda_col().data == np.arange(length)[testbed.visible_rows] + 1).all()
@pytest.mark.parametrize(
"col_type",
[NumPyTensorColumn, NumPyTensorColumn, ObjectColumn],
)
def test_composed_lambda_columns(col_type: Type):
testbed = MockColumn(col_type=col_type)
# Build a dataset from a batch
lambda_col = testbed.col.defer(lambda x: x + 1)
lambda_col = lambda_col.defer(lambda x: x + 1)
assert (lambda_col() == testbed.array[testbed.visible_rows] + 2).all()
def test_df_concat():
length = 16
testbed = MockDatapanel(length=length)
df = testbed.df
def fn(x):
return x["a"] + 1
col_a = df.defer(fn)
col_b = df.defer(fn)
out = mk.concat([col_a, col_b])
assert isinstance(out, DeferredColumn)
assert (out().data == np.concatenate([np.arange(length) + 1] * 2)).all()
col_a = df.defer(fn)
col_b = df.defer(lambda x: x["a"])
with pytest.warns(ConcatWarning):
out = mk.concat([col_a, col_b])
@pytest.mark.parametrize("col_type", [NumPyTensorColumn, ObjectColumn])
def test_col_concat(col_type):
testbed = MockColumn(col_type=col_type)
col = testbed.col
length = len(col)
def fn(x):
return x + 1
col_a = col.defer(fn)
col_b = col.defer(fn)
out = mk.concat([col_a, col_b])
assert isinstance(out, DeferredColumn)
assert (out().data == np.concatenate([np.arange(length) + 1] * 2)).all()
col_a = col.defer(fn)
col_b = col.defer(lambda x: x)
with pytest.warns(ConcatWarning):
out = mk.concat([col_a, col_b])
| meerkat-main | tests/meerkat/columns/deferred/test_deferred.py |
meerkat-main | tests/meerkat/columns/object/__init__.py |
|
import time
import numpy as np
from PIL import Image
from meerkat.columns.object.base import ObjectColumn
from meerkat.interactive.formatter.image import ImageFormatterGroup
def test_formatters_image():
"""Test formatters when the object column is full of images."""
images = [
Image.fromarray(np.random.randint(0, 255, (100, 100, 3), dtype=np.uint8))
for _ in range(10)
]
# Automatically detect the formatters.
col = ObjectColumn(images)
assert isinstance(col.formatters, ImageFormatterGroup)
# make sure the formatters do not modify the object in place.
for key in col.formatters.keys():
size = images[0].size
col.formatters[key].encode(images[0])
assert images[0].size == size
def test_to_numpy_speed():
"""Casting an object column to numpy should be fast (< 1 second)."""
objs = [[0, 0, 0, 0] for _ in range(10000)]
col = ObjectColumn(objs)
start = time.perf_counter()
col.to_numpy()
time_elapsed = time.perf_counter() - start
assert time_elapsed < 1.0
start = time.perf_counter()
np.array(col)
time_elapsed = time.perf_counter() - start
assert time_elapsed < 1.0
| meerkat-main | tests/meerkat/columns/object/test_base.py |
import numpy as np
import pandas as pd
import pytest
import torch
from meerkat import TensorColumn, TorchTensorColumn
from meerkat.block.numpy_block import NumPyBlock
from ..abstract import AbstractColumnTestBed, column_parametrize
class TensorColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"num_dims": [1, 2, 3],
"dim_length": [1, 5],
"dtype": ["float", "int"],
}
marks = pytest.mark.tensor_col
def __init__(
self,
length: int = 16,
num_dims: int = True,
dim_length: int = 5,
dtype="float",
seed: int = 123,
tmpdir: str = None,
):
self.dtype = dtype
np.random.seed(seed)
array = (
np.random.random((length, *[dim_length for _ in range(num_dims - 1)])) * 10
)
array = torch.tensor(array).to({"int": torch.int, "float": torch.float}[dtype])
self.col = TorchTensorColumn(array)
self.data = array
def get_map_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
return {
"fn": lambda x, k=0: x + salt + k,
"expected_result": TorchTensorColumn(self.col.data + salt + kwarg),
}
def get_filter_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
return {
"fn": lambda x, k=0: (
(x > 3 + salt + k).to(dtype=bool) if batched else (x > 3 + salt + k)
),
"expected_result": self.col[self.col.data > 3 + salt + kwarg],
}
def get_data(self, index, materialize=True):
return self.data[index]
def get_data_to_set(self, data_index):
return torch.zeros_like(self.get_data(data_index))
@staticmethod
def assert_data_equal(data1: np.ndarray, data2: np.ndarray):
assert (data1 == data2).all()
@pytest.fixture(**column_parametrize([TensorColumnTestBed]))
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
def test_init_block():
block_view = NumPyBlock(np.zeros((10, 10)))[0]
with pytest.raises(ValueError):
TorchTensorColumn(block_view)
def test_to_tensor(testbed):
col, _ = testbed.col, testbed.data
tensor = col.to_tensor()
assert torch.is_tensor(tensor)
assert (col == tensor.numpy()).all()
def test_to_pandas(testbed):
series = testbed.col.to_pandas()
assert isinstance(series, pd.Series)
if testbed.col.shape == 1:
assert (series.values == testbed.col.data).all()
else:
for idx in range(len(testbed.col)):
assert (series.iloc[idx] == testbed.col[idx].numpy()).all()
def test_repr_pandas(testbed):
series = testbed.col.to_pandas()
assert isinstance(series, pd.Series)
def test_ufunc_unhandled():
a = TorchTensorColumn([1, 2, 3])
with pytest.raises(TypeError):
a == "a"
@pytest.mark.parametrize(
"data",
[[1, 2, 3], np.asarray([1, 2, 3]), torch.tensor([1, 2, 3]), pd.Series([1, 2, 3])],
)
@pytest.mark.parametrize("backend", ["numpy", "torch"])
def test_backend(data, backend: str):
col = TensorColumn(data, backend=backend)
expected_type = {"numpy": np.ndarray, "torch": torch.Tensor}[backend]
assert isinstance(col.data, expected_type)
col_data = col.data
if isinstance(col_data, torch.Tensor):
col_data = col_data.numpy()
if isinstance(data, torch.Tensor):
data = data.numpy()
col_data = np.asarray(col_data)
data = np.asarray(data)
assert (col_data == data).all()
| meerkat-main | tests/meerkat/columns/tensor/test_tensor_column.py |
import numpy as np
import pandas as pd
import pytest
import torch
from meerkat import NumPyTensorColumn, TorchTensorColumn
from meerkat.block.torch_block import TorchBlock
from ..abstract import AbstractColumnTestBed, column_parametrize
class TorchTensorColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"num_dims": [1, 2, 3],
"dim_length": [1, 5],
"dtype": ["float", "int"],
}
marks = pytest.mark.tensor_col
def __init__(
self,
length: int = 16,
num_dims: int = True,
dim_length: int = 5,
dtype="float",
seed: int = 123,
tmpdir: str = None,
):
self.dtype = dtype
np.random.seed(seed)
array = (
np.random.random((length, *[dim_length for _ in range(num_dims - 1)])) * 10
)
array = torch.tensor(array).to({"int": torch.int, "float": torch.float}[dtype])
self.col = TorchTensorColumn(array)
self.data = array
def get_map_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
return {
"fn": lambda x, k=0: x + salt + k,
"expected_result": TorchTensorColumn(self.col.data + salt + kwarg),
}
def get_filter_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
return {
"fn": lambda x, k=0: (
(x > 3 + salt + k).to(dtype=bool) if batched else (x > 3 + salt + k)
),
"expected_result": self.col[self.col.data > 3 + salt + kwarg],
}
def get_data(self, index, materialize=True):
return self.data[index]
def get_data_to_set(self, data_index):
return torch.zeros_like(self.get_data(data_index))
@staticmethod
def assert_data_equal(data1: np.ndarray, data2: np.ndarray):
assert (data1 == data2).all()
@pytest.fixture(**column_parametrize([TorchTensorColumnTestBed]))
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
def test_init_block():
block_view = TorchBlock(torch.zeros(10, 10))[0]
with pytest.raises(ValueError):
NumPyTensorColumn(block_view)
def test_to_tensor(testbed):
col, _ = testbed.col, testbed.data
tensor = col.to_tensor()
assert torch.is_tensor(tensor)
assert (col == tensor.numpy()).all()
def test_to_pandas(testbed):
series = testbed.col.to_pandas(allow_objects=True)
assert isinstance(series, pd.Series)
if testbed.col.shape == 1:
assert (series.values == testbed.col.data).all()
else:
for idx in range(len(testbed.col)):
assert (series.iloc[idx] == testbed.col[idx].numpy()).all()
@pytest.mark.parametrize(
"data",
[
pd.Series([1, 2, 3]),
[1, 2, 3],
[[1, 2, 3], [4, 5, 6]],
[np.asarray([1, 2, 3]), np.asarray([4, 5, 6])],
],
)
def test_from_numpyable(data):
"""Test that numpyable objects can also be converted to
TorchTensorColumn."""
col = TorchTensorColumn(data)
assert isinstance(col, TorchTensorColumn)
assert (col.data == torch.as_tensor(np.asarray(data))).all()
def test_repr_pandas(testbed):
series = testbed.col.to_pandas(allow_objects=True)
assert isinstance(series, pd.Series)
def test_ufunc_unhandled():
a = TorchTensorColumn([1, 2, 3])
with pytest.raises(TypeError):
a == "a"
| meerkat-main | tests/meerkat/columns/tensor/test_torch.py |
meerkat-main | tests/meerkat/columns/tensor/__init__.py |
|
import os
import numpy as np
import numpy.testing as np_test
import pandas as pd
import pytest
from numpy.lib.format import open_memmap
from meerkat import NumPyTensorColumn, TorchTensorColumn
from meerkat.block.numpy_block import NumPyBlock
from ....utils import product_parametrize
from ..abstract import AbstractColumnTestBed, column_parametrize
class NumPyTensorColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"num_dims": [1, 2, 3],
"dim_length": [1, 5],
"dtype": ["float", "int"],
"mmap": [True, False],
}
marks = pytest.mark.numpy_col
def __init__(
self,
length: int = 16,
num_dims: int = True,
dim_length: int = 5,
dtype="float",
mmap: bool = False,
seed: int = 123,
tmpdir: str = None,
):
self.dtype = dtype
np.random.seed(seed)
array = (
np.random.random((length, *[dim_length for _ in range(num_dims - 1)])) * 10
)
array = array.astype(dtype)
if mmap:
mmap = open_memmap(
filename=os.path.join(tmpdir, "mmap"),
dtype=array.dtype,
shape=array.shape,
mode="w+",
)
mmap[:] = array
self.col = NumPyTensorColumn.from_array(mmap)
else:
self.col = NumPyTensorColumn.from_array(array)
self.data = array
def get_map_spec(
self,
batched: bool = True,
materialize: bool = False,
kwarg: int = 0,
salt: int = 1,
):
return {
"fn": lambda x, k=0: x + salt + k,
"expected_result": NumPyTensorColumn.from_array(
self.col.data + salt + kwarg
),
"output_type": NumPyTensorColumn,
}
def get_filter_spec(
self,
batched: bool = True,
materialize: bool = False,
kwarg: int = 0,
salt: int = 1,
):
return {
"fn": lambda x, k=0: x > 3 + k + salt,
"expected_result": self.col[self.col.data > 3 + salt + kwarg],
}
def get_data(self, index, materialize=True):
return self.data[index]
def get_data_to_set(self, data_index):
return np.zeros_like(self.get_data(data_index))
@staticmethod
def assert_data_equal(data1: np.ndarray, data2: np.ndarray):
assert (data1 == data2).all()
@pytest.fixture(**column_parametrize([NumPyTensorColumnTestBed]))
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
def test_init_block():
block_view = NumPyBlock(np.zeros((10, 10)))[0]
with pytest.raises(ValueError):
TorchTensorColumn(block_view)
@product_parametrize(params={"batched": [True, False]})
def test_map_return_single_mmap(tmpdir, testbed: AbstractColumnTestBed, batched: bool):
col = testbed.col
map_spec = testbed.get_map_spec(batched=batched)
def func(x):
out = map_spec["fn"](x)
return out
mmap_path = os.path.join(tmpdir, "mmap_path")
result = col.map(
func,
batch_size=4,
mmap=True,
mmap_path=mmap_path,
is_batched_fn=batched,
output_type=map_spec.get("output_type", None),
)
result = NumPyTensorColumn(result)
assert result.is_equal(map_spec["expected_result"])
# FIXME: when we add mmap support back to map reintroduce this test.
# assert isinstance(result.data, np.memmap)
# assert result.data.filename == mmap_path
@product_parametrize(params={"link": [True, False], "mmap": [True, False]})
def test_io_mmap(tmp_path, testbed, link, mmap):
col = testbed.col
path = os.path.join(tmp_path, "test")
col.write(path, link=link)
assert os.path.islink(os.path.join(path, "data.npy")) == (link and col.is_mmap)
new_col = NumPyTensorColumn.read(path, mmap=mmap)
assert isinstance(new_col, NumPyTensorColumn)
assert col.is_equal(new_col)
assert new_col.is_mmap == mmap
def test_from_array():
# Build a dataset from a batch
array = np.random.rand(10, 3, 3)
col = NumPyTensorColumn.from_array(array)
assert (col == array).all()
np_test.assert_equal(len(col), 10)
def test_to_pandas(testbed):
series = testbed.col.to_pandas()
assert isinstance(series, pd.Series)
if testbed.col.shape == 1:
assert (series.values == testbed.col.data).all()
else:
for idx in range(len(testbed.col)):
assert (series.iloc[idx] == testbed.col[idx]).all()
def test_repr_pandas(testbed):
series = testbed.col.to_pandas()
assert isinstance(series, pd.Series)
def test_ufunc_out():
out = np.zeros(3)
a = NumPyTensorColumn([1, 2, 3])
b = NumPyTensorColumn([1, 2, 3])
result = np.add(a, b, out=out)
assert (result.data == out).all()
def test_ufunc_at():
a = NumPyTensorColumn([1, 2, 3])
result = np.add.at(a, [0, 1, 1], 1)
assert result is None
assert a.is_equal(NumPyTensorColumn([2, 4, 3]))
def test_ufunc_unhandled():
a = NumPyTensorColumn([1, 2, 3])
with pytest.raises(TypeError):
a == "a"
| meerkat-main | tests/meerkat/columns/tensor/test_numpy.py |
meerkat-main | tests/meerkat/columns/scalar/__init__.py |
|
import itertools
from typing import Dict
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import torch
from meerkat import ScalarColumn
from meerkat.dataframe import DataFrame
from tests.utils import product_parametrize
BACKENDS = ["arrow", "pandas"]
@pytest.mark.parametrize(
"data",
[[1, 2, 3], np.asarray([1, 2, 3]), torch.tensor([1, 2, 3]), pd.Series([1, 2, 3])],
)
@pytest.mark.parametrize("backend", BACKENDS)
def test_backend(data, backend: str):
col = ScalarColumn(data, backend=backend)
expected_type = {"arrow": (pa.Array, pa.ChunkedArray), "pandas": pd.Series}[backend]
assert isinstance(col.data, expected_type)
col_data = col.data
if isinstance(col_data, torch.Tensor):
col_data = col_data.numpy()
if isinstance(data, torch.Tensor):
data = data.numpy()
col_data = np.asarray(col_data)
data = np.asarray(data)
assert (col_data == data).all()
NUMERIC_COLUMNS = [
np.array([1, 4, 6, 8]),
np.array([1, 4, 6, 8], dtype=float),
]
BOOL_COLUMNS = [
np.array([True, True, True]),
np.array([True, False, True]),
np.array([False, False, False]),
]
NAN_COLUMNS = [
np.array([np.nan, 1, 2]),
np.array([1, 4, 3], dtype=float),
np.array([1, np.nan, 3], dtype=float),
np.array([np.nan, np.nan, np.nan]),
]
@product_parametrize({"backend": BACKENDS, "data": NUMERIC_COLUMNS + BOOL_COLUMNS})
def test_mean(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
assert data.mean() == col.mean()
@product_parametrize({"backend": BACKENDS, "data": NUMERIC_COLUMNS + BOOL_COLUMNS})
def test_mode(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
assert np.all(pd.Series(data).mode().values == col.mode().to_numpy())
@product_parametrize({"backend": BACKENDS, "data": NUMERIC_COLUMNS})
def test_median(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
if backend == "arrow":
with pytest.warns(UserWarning):
assert np.median(data) == col.median()
else:
assert np.median(data) == col.median()
@product_parametrize({"backend": BACKENDS, "data": NUMERIC_COLUMNS + BOOL_COLUMNS})
def test_min(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
assert np.min(data) == col.min()
@product_parametrize({"backend": BACKENDS, "data": NUMERIC_COLUMNS + BOOL_COLUMNS})
def test_max(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
assert np.max(data) == col.max()
@product_parametrize({"backend": BACKENDS, "data": NUMERIC_COLUMNS})
def test_var(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
assert np.var(data, ddof=1) == col.var()
@product_parametrize({"backend": BACKENDS, "data": NUMERIC_COLUMNS})
def test_std(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
assert np.std(data, ddof=1) == col.std()
@product_parametrize({"backend": BACKENDS, "data": NUMERIC_COLUMNS + BOOL_COLUMNS})
def test_sum(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
assert data.sum() == col.sum()
@product_parametrize({"backend": BACKENDS, "data": BOOL_COLUMNS})
def test_any(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
assert data.prod() == col.product()
@product_parametrize({"backend": BACKENDS, "data": BOOL_COLUMNS})
def test_all(data: np.ndarray, backend: str):
col = ScalarColumn(data, backend=backend)
assert data.prod() == col.product()
NUMERIC_COLUMN_OPERANDS = [{"a": col, "b": col + 1} for col in NUMERIC_COLUMNS]
NUMERIC_SCALAR_OPERANDS = [{"a": col, "b": col[0].item()} for col in NUMERIC_COLUMNS]
BOOL_COLUMN_OPERANDS = [
{"a": col_a, "b": col_b} for col_a, col_b in itertools.combinations(BOOL_COLUMNS, 2)
]
BOOL_SCALAR_OPERANDS = [
{"a": col_a, "b": col_b[0].item()}
for col_a, col_b in itertools.combinations(BOOL_COLUMNS, 2)
]
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_add_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a + col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] + operands["b"], backend=backend))
@product_parametrize(
{"backend": BACKENDS, "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_add_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] + col_a
else:
out = col_a + operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] + operands["b"], backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_sub_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a - col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] - operands["b"], backend=backend))
@product_parametrize(
{"backend": BACKENDS, "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_sub_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] - col_a
correct = operands["b"] - operands["a"]
else:
out = col_a - operands["b"]
correct = operands["a"] - operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(correct, backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_mul_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a * col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] * operands["b"], backend=backend))
@product_parametrize(
{"backend": BACKENDS, "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_mul_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] * col_a
correct = operands["b"] * operands["a"]
else:
out = col_a * operands["b"]
correct = operands["a"] * operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(correct, backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_truediv_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a / col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] / operands["b"], backend=backend))
@product_parametrize(
{"backend": BACKENDS, "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_truediv_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] / col_a
correct = operands["b"] / operands["a"]
else:
out = col_a / operands["b"]
correct = operands["a"] / operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(correct, backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_floordiv_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a // col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] // operands["b"], backend=backend))
@product_parametrize(
{"backend": BACKENDS, "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_floordiv_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] // col_a
correct = operands["b"] // operands["a"]
else:
out = col_a // operands["b"]
correct = operands["a"] // operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(correct, backend=backend))
@product_parametrize({"backend": ["pandas"], "operands": NUMERIC_COLUMN_OPERANDS})
def test_mod_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a % col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] % operands["b"], backend=backend))
@product_parametrize(
{"backend": ["pandas"], "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_mod_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] % col_a
correct = operands["b"] % operands["a"]
else:
out = col_a % operands["b"]
correct = operands["a"] % operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(correct, backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_pow_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a**col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] ** operands["b"], backend=backend))
@product_parametrize(
{"backend": BACKENDS, "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_pow_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] ** col_a
correct = operands["b"] ** operands["a"]
else:
out = col_a ** operands["b"]
correct = operands["a"] ** operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(correct, backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_eq_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a == col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] == operands["b"], backend=backend))
@product_parametrize(
{"backend": BACKENDS, "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_eq_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] == col_a
correct = operands["b"] == operands["a"]
else:
out = col_a == operands["b"]
correct = operands["a"] == operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(correct, backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_gt_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a > col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] > operands["b"], backend=backend))
@product_parametrize(
{"backend": BACKENDS, "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_gt_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] > col_a
correct = operands["b"] > operands["a"]
else:
out = col_a > operands["b"]
correct = operands["a"] > operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(correct, backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_lt_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a < col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] < operands["b"], backend=backend))
@product_parametrize(
{"backend": BACKENDS, "operands": NUMERIC_SCALAR_OPERANDS, "right": [True, False]}
)
def test_lt_scalar(backend: str, operands: Dict[str, np.array], right: bool):
col_a = ScalarColumn(operands["a"], backend=backend)
if right:
out = operands["b"] < col_a
correct = operands["b"] < operands["a"]
else:
out = col_a < operands["b"]
correct = operands["a"] < operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(correct, backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": BOOL_COLUMN_OPERANDS})
def test_and_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a & col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] & operands["b"], backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": BOOL_COLUMN_OPERANDS})
def test_and_scalar(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
out = col_a & operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] & operands["b"], backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": BOOL_COLUMN_OPERANDS})
def test_or_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a | col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] | operands["b"], backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": BOOL_COLUMN_OPERANDS})
def test_or_scalar(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
out = col_a | operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] | operands["b"], backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": BOOL_COLUMN_OPERANDS})
def test_xor_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
col_b = ScalarColumn(operands["b"], backend=backend)
out = col_a ^ col_b
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] ^ operands["b"], backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": BOOL_COLUMN_OPERANDS})
def test_xor_scalar(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
out = col_a ^ operands["b"]
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(operands["a"] ^ operands["b"], backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": BOOL_COLUMN_OPERANDS})
def test_invert_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
out = ~col_a
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(~operands["a"], backend=backend))
@product_parametrize({"backend": BACKENDS, "operands": NUMERIC_COLUMN_OPERANDS})
def test_isin_column(backend: str, operands: Dict[str, np.array]):
col_a = ScalarColumn(operands["a"], backend=backend)
values = [operands["b"][0], operands["b"][1], operands["b"][2]]
out = col_a.isin(values)
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(np.isin(operands["a"], values), backend=backend))
@product_parametrize({"backend": BACKENDS, "column": NAN_COLUMNS})
def test_isna(backend: str, column: np.array):
col = ScalarColumn(column, backend=backend)
out = col.isna()
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(np.isnan(column), backend=backend))
@product_parametrize({"backend": BACKENDS, "column": NAN_COLUMNS})
def test_isnull(backend: str, column: np.array):
col = ScalarColumn(column, backend=backend)
out = col.isnull()
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(np.isnan(column), backend=backend))
STRING_COLUMNS = [
pd.Series(
[
"a asdsd ",
"bfdidf.",
"c asdasd dsd",
"1290_dij",
"d",
" efdsdf ",
"Fasdd asdasd",
"pppqqqqq",
"1290_dijaaa",
"hl2dirf83WIW",
"22222",
"1290_disdj",
]
),
]
@product_parametrize(
{
"backend": BACKENDS,
"series": STRING_COLUMNS,
"compute_fn": [
"capitalize",
"isalnum",
"isalpha",
"isdecimal",
"isdigit",
"islower",
"isnumeric",
"isspace",
"istitle",
"isupper",
"lower",
"upper",
"len",
"lower",
"swapcase",
"title",
"strip",
"lstrip",
"rstrip",
],
}
)
def test_unary_str_methods(backend: str, series: pd.Series, compute_fn: str):
col = ScalarColumn(series, backend=backend)
out = getattr(col.str, compute_fn)()
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(getattr(series.str, compute_fn)(), backend=backend))
# @product_parameterize(
# {
# "backend": BACKENDS,
# "series": STRING_COLUMNS,
# "compute_fn": [
# ]
# }
# )
# def test_pattern_str_methods(backend: str, series: pd.Series, compute_fn: str):
# col = ScalarColumn(series, backend=backend)
# out = getattr(col.str, compute_fn)()
# assert isinstance(out, ScalarColumn)
# assert out.equals(
# ScalarColumn(getattr(series.str, compute_fn)(), backend=backend)
# )
@product_parametrize(
{
"backend": BACKENDS,
"series": STRING_COLUMNS,
}
)
def test_center(backend: str, series: pd.Series):
col = ScalarColumn(series, backend=backend)
out = col.str.center(20)
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(series.str.center(20), backend=backend))
@product_parametrize({"backend": BACKENDS, "series": STRING_COLUMNS, "n": [-1, 1, 2]})
def test_split(backend: str, series: pd.Series, n: int):
col = ScalarColumn(series, backend=backend)
out = col.str.split(" ", n=n)
assert isinstance(out, DataFrame)
correct_df = DataFrame(
{
str(name): ScalarColumn(col, backend=backend)
for name, col in series.str.split(" ", n=n, expand=True).items()
}
)
assert correct_df.columns == out.columns
for name in correct_df.columns:
assert correct_df[name].equals(out[name])
@product_parametrize({"backend": BACKENDS, "series": STRING_COLUMNS, "n": [-1, 1, 2]})
def test_rsplit(backend: str, series: pd.Series, n: int):
col = ScalarColumn(series, backend=backend)
out = col.str.rsplit(" ", n=n)
assert isinstance(out, DataFrame)
correct_df = DataFrame(
{
str(name): ScalarColumn(col, backend=backend)
for name, col in series.str.rsplit(" ", n=n, expand=True).items()
}
)
assert correct_df.columns == out.columns
for name in correct_df.columns:
assert correct_df[name].equals(out[name])
@product_parametrize(
{
"backend": BACKENDS,
"series": STRING_COLUMNS,
}
)
def test_center_other(backend: str, series: pd.Series):
col = ScalarColumn(series, backend=backend)
out = col.str.startswith("1290")
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(series.str.startswith("1290"), backend=backend))
@product_parametrize(
{
"backend": BACKENDS,
"series": STRING_COLUMNS,
}
)
def test_replace(backend: str, series: pd.Series):
col = ScalarColumn(series, backend=backend)
out = col.str.replace("di", "do")
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(series.str.replace("di", "do"), backend=backend))
@product_parametrize(
{
"backend": BACKENDS,
"series": STRING_COLUMNS,
}
)
def test_contains(backend: str, series: pd.Series):
col = ScalarColumn(series, backend=backend)
out = col.str.contains("di")
assert isinstance(out, ScalarColumn)
assert out.equals(ScalarColumn(series.str.contains("di"), backend=backend))
@product_parametrize(
{
"backend": BACKENDS,
"series": STRING_COLUMNS,
"pat": ["[0-9]+", "(?P<group1>[0-9])(?P<group2>2)"],
}
)
def test_extract(backend: str, pat: str, series: pd.Series):
col = ScalarColumn(series, backend=backend)
contains_groups = True
try:
correct_df = series.str.extract(pat, expand=True)
except ValueError:
contains_groups = False
if contains_groups:
out = col.str.extract(pat)
assert isinstance(out, DataFrame)
for name in correct_df.columns:
assert out[name].equals(ScalarColumn(correct_df[name], backend=backend))
else:
with pytest.raises(ValueError):
out = col.str.extract(pat)
| meerkat-main | tests/meerkat/columns/scalar/test_scalar_column.py |
"""Unittests for NumpyColumn."""
import numpy as np
import pandas as pd
import pytest
import torch
from meerkat import ScalarColumn
from meerkat.block.torch_block import TorchBlock
from ..abstract import AbstractColumnTestBed, column_parametrize
class PandasScalarColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"contiguous_index": [True, False],
"dtype": ["float", "int", "str"],
}
marks = pytest.mark.pandas_col
def __init__(
self,
length: int = 16,
dtype="float",
contiguous_index: bool = True,
seed: int = 123,
tmpdir: str = None,
):
self.dtype = dtype
np.random.seed(seed)
array = np.random.random(length) * 10
series = pd.Series(array).astype(dtype)
if not contiguous_index:
series.index = np.arange(1, 1 + 2 * length, 2)
self.col = ScalarColumn(series)
self.data = series
def get_map_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
salt = salt if self.dtype != "str" else str(salt)
kwarg = kwarg if self.dtype != "str" else str(kwarg)
return {
"fn": lambda x, k=0: x + salt + (k if self.dtype != "str" else str(k)),
"expected_result": ScalarColumn(self.col.data + salt + kwarg),
"output_type": ScalarColumn,
}
def get_filter_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
salt = 3 + salt if self.dtype != "str" else str(3 + salt)
kwarg = kwarg if self.dtype != "str" else str(kwarg)
return {
"fn": lambda x, k=0: x > salt + (k if self.dtype != "str" else str(k)),
"expected_result": self.col[self.col.data > salt + kwarg],
}
def get_data(self, index, materialize: bool = True):
return self.data.iloc[index]
def get_data_to_set(self, data_index):
if isinstance(data_index, int):
return 0
return pd.Series(np.zeros_like(self.get_data(data_index).values))
@staticmethod
def assert_data_equal(data1: pd.Series, data2: np.ndarray):
if isinstance(data1, pd.Series):
assert (data1.values == data2.values).all()
else:
assert data1 == data2
@pytest.fixture(**column_parametrize([PandasScalarColumnTestBed]))
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
def test_dt_accessor():
col = ScalarColumn(
data=[f"01/{idx+1}/2001" for idx in range(16)],
)
col = pd.to_datetime(col)
day_col = col.dt.day
assert isinstance(day_col, ScalarColumn)
assert (day_col.values == np.arange(16) + 1).all()
def test_cat_accessor():
categories = ["a", "b", "c", "d"]
col = ScalarColumn(data=categories * 4)
col = col.astype("category")
assert (np.array(categories) == col.cat.categories.values).all()
def test_init_block():
block_view = TorchBlock(torch.zeros(10, 10))[0]
with pytest.raises(ValueError):
ScalarColumn(block_view)
def test_to_tensor(testbed):
col, _ = testbed.col, testbed.data
if testbed.dtype == "str":
with pytest.raises(ValueError):
col.to_tensor()
else:
tensor = col.to_tensor()
assert torch.is_tensor(tensor)
assert (col == tensor.numpy()).all()
def test_to_pandas(testbed):
col, _ = testbed.col, testbed.data
series = col.to_pandas()
assert isinstance(series, pd.Series)
assert (col.data.values == series.values).all()
def test_repr_pandas(testbed):
series = testbed.col.to_pandas()
assert isinstance(series, pd.Series)
def test_ufunc_out():
out = np.zeros(3)
a = ScalarColumn([1, 2, 3])
b = ScalarColumn([1, 2, 3])
np.add(a, b, out=out)
assert (out == np.array([2, 4, 6])).all()
| meerkat-main | tests/meerkat/columns/scalar/test_pandas.py |
"""Unittests for NumpyColumn."""
from typing import Union
import numpy as np
import pandas as pd
import pyarrow as pa
import pytest
import torch
from meerkat import ArrowScalarColumn
from meerkat.block.torch_block import TorchBlock
from ..abstract import AbstractColumnTestBed, column_parametrize
def to_numpy(array: Union[pa.Array, pa.ChunkedArray]):
"""For non-chunked arrays, need to pass zero_copy_only=False."""
if isinstance(array, pa.ChunkedArray):
return array.to_numpy()
return array.to_numpy(zero_copy_only=False)
class ArrowScalarColumnTestBed(AbstractColumnTestBed):
DEFAULT_CONFIG = {
"dtype": ["float", "int", "str"],
}
marks = pytest.mark.arrow_col
def __init__(
self,
length: int = 16,
dtype="float",
seed: int = 123,
tmpdir: str = None,
):
self.dtype = dtype
np.random.seed(seed)
array = np.random.random(length) * 10
if dtype == "float":
array = pa.array(array, type=pa.float64())
elif dtype == "int":
array = array.astype(int)
array = pa.array(array, type=pa.int64())
elif dtype == "str":
array = pd.Series(array).astype("str")
array = pa.array(array, type=pa.string())
else:
raise ValueError(f"dtype {dtype} not supported.")
self.col = ArrowScalarColumn(array)
self.data = array
def get_map_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
salt = salt if self.dtype != "str" else str(salt)
kwarg = kwarg if self.dtype != "str" else str(kwarg)
if batched:
return {
"fn": lambda x, k=0: pa.array(
to_numpy(x.data) + salt + (k if self.dtype != "str" else str(k))
),
"expected_result": ArrowScalarColumn(
to_numpy(self.col.data) + salt + kwarg
),
"output_type": ArrowScalarColumn,
}
else:
return {
"fn": lambda x, k=0: x + salt + (k if self.dtype != "str" else str(k)),
"expected_result": ArrowScalarColumn(
to_numpy(self.col.data) + salt + kwarg
),
"output_type": ArrowScalarColumn,
}
def get_filter_spec(
self,
batched: bool = True,
materialize: bool = False,
salt: int = 1,
kwarg: int = 0,
):
salt = 3 + salt if self.dtype != "str" else str(3 + salt)
kwarg = kwarg if self.dtype != "str" else str(kwarg)
if batched:
return {
"fn": lambda x, k=0: to_numpy(x.data)
> salt + (k if self.dtype != "str" else str(k)),
"expected_result": self.col[to_numpy(self.col.data) > salt + kwarg],
}
else:
return {
"fn": lambda x, k=0: x > salt + (k if self.dtype != "str" else str(k)),
"expected_result": self.col[to_numpy(self.col.data) > salt + kwarg],
}
def get_data(self, index, materialize: bool = True):
if isinstance(index, slice) or isinstance(index, int):
data = self.data[index]
elif index.dtype == bool:
data = self.data.filter(pa.array(index))
else:
data = self.data.take(index)
if isinstance(index, int):
return data.as_py()
return data
def get_data_to_set(self, data_index):
if isinstance(data_index, int):
return 0
return pd.Series(np.zeros(len(self.data)))
@staticmethod
def assert_data_equal(data1: pa.Array, data2: pa.Array):
if isinstance(data1, (pa.Array, pa.ChunkedArray)):
assert (to_numpy(data1) == to_numpy(data2)).all()
else:
assert data1 == data2
@pytest.fixture(**column_parametrize([ArrowScalarColumnTestBed]))
def testbed(request, tmpdir):
testbed_class, config = request.param
return testbed_class(**config, tmpdir=tmpdir)
def test_init_block():
block_view = TorchBlock(torch.zeros(10, 10))[0]
with pytest.raises(ValueError):
ArrowScalarColumn(block_view)
def test_to_numpy(testbed):
col, _ = testbed.col, testbed.data
array = col.to_numpy()
assert isinstance(array, np.ndarray)
assert (col.data.to_numpy() == array).all()
def test_to_tensor(testbed):
col, _ = testbed.col, testbed.data
if testbed.dtype == "str":
with pytest.raises(TypeError):
col.to_tensor()
else:
tensor = col.to_tensor()
assert torch.is_tensor(tensor)
assert (col.data.to_numpy() == tensor.numpy()).all()
def test_to_pandas(testbed):
col, _ = testbed.col, testbed.data
series = col.to_pandas()
assert isinstance(series, pd.Series)
assert (col.data.to_pandas() == series.values).all()
def test_repr_pandas(testbed):
series = testbed.col.to_pandas()
assert isinstance(series, pd.Series)
| meerkat-main | tests/meerkat/columns/scalar/test_arrow.py |
import glob
import os
import tempfile
from pathlib import Path
import pytest
from meerkat import initialize_logging
def test_initialize_logging():
initialize_logging()
@pytest.fixture
def unreadable_dir(tmpdir):
unread_dir = tmpdir / "unreadable"
os.makedirs(unread_dir)
unread_dir.chmod(0)
if os.access(str(unread_dir), os.R_OK):
# Docker container or similar
pytest.skip("File was still readable")
yield unread_dir
unread_dir.chmod(0o755)
def test_initialize_logging_permission_denied(monkeypatch, unreadable_dir):
def mock_no_access_dir():
return unreadable_dir
monkeypatch.setattr(Path, "home", mock_no_access_dir)
monkeypatch.setattr(tempfile, "gettempdir", mock_no_access_dir)
with pytest.raises(
PermissionError,
match="Permission denied in all of Meerkat's default logging directories. "
"Set environment variable `MEERKAT_LOG_DIR` to specify a directory for "
"Meerkat logging.",
):
initialize_logging()
def test_initialize_logging_environment_variable(monkeypatch, tmpdir):
monkeypatch.setattr(
os, "environ", {"MEERKAT_LOG_DIR": os.path.join(tmpdir, "env_dir")}
)
initialize_logging()
out = list(glob.glob(str(tmpdir) + "/**/meerkat.log", recursive=True))
assert len(out) != 0
def test_initialize_logging_arg(tmpdir):
initialize_logging(log_dir=os.path.join(tmpdir, "env_dir"))
out = list(glob.glob(str(tmpdir) + "/**/meerkat.log", recursive=True))
assert len(out) != 0
| meerkat-main | tests/meerkat/logging/test_utils.py |
import pytest
import torch
from meerkat import TorchTensorColumn
from meerkat.block.abstract import BlockView
from meerkat.block.ref import BlockRef
from meerkat.block.torch_block import TorchBlock
from meerkat.errors import ConsolidationError
def test_signature_hash():
# check equal
block1 = TorchBlock(torch.zeros((100, 10)))
block2 = TorchBlock(torch.ones((100, 10)))
assert hash(block1.signature) == hash(block2.signature)
# check differing type
block1 = TorchBlock(torch.zeros((100, 10), dtype=int))
block2 = TorchBlock(torch.ones((100, 10), dtype=float))
assert hash(block1.signature) != hash(block2.signature)
# check differing column width okay
block1 = TorchBlock(torch.zeros((100, 13), dtype=int))
block2 = TorchBlock(torch.ones((100, 10), dtype=int))
assert hash(block1.signature) == hash(block2.signature)
# check differing column width okay
block1 = TorchBlock(torch.zeros((100, 13, 15), dtype=int))
block2 = TorchBlock(torch.ones((100, 10, 15), dtype=int))
assert hash(block1.signature) == hash(block2.signature)
# check differing later dimensions not okay
block1 = TorchBlock(torch.zeros((100, 10, 15), dtype=int))
block2 = TorchBlock(torch.ones((100, 10, 20), dtype=int))
assert hash(block1.signature) != hash(block2.signature)
# check differing devices not okay
block1 = TorchBlock(torch.zeros((100, 10, 15), dtype=int))
block2 = TorchBlock(torch.ones((100, 10, 20), dtype=int).cpu())
assert hash(block1.signature) != hash(block2.signature)
# check differing nrows not okay
block1 = TorchBlock(torch.zeros((90, 10, 15), dtype=int))
block2 = TorchBlock(torch.ones((100, 10, 20), dtype=int))
assert hash(block1.signature) != hash(block2.signature)
@pytest.mark.parametrize("num_blocks", [1, 2, 3])
def test_consolidate_1(num_blocks):
# check equal
blocks = [
TorchBlock(torch.stack([torch.arange(8)] * 12)) for _ in range(num_blocks)
]
slices = [
[0, slice(2, 5, 1)],
[6, slice(2, 7, 2)],
[slice(2, 7, 3), slice(1, 8, 1)],
]
cols = [
{
str(slc): TorchTensorColumn(
data=BlockView(
block=blocks[block_idx],
block_index=slc,
)
)
for slc in slices[block_idx]
}
for block_idx in range(num_blocks)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
block_ref = TorchBlock.consolidate(block_refs=block_refs)
for ref in block_refs:
block = ref.block
for name, col in ref.items():
assert (
block.data[:, col._block_index]
== block_ref.block.data[:, block_ref[name]._block_index]
).all()
def test_consolidate_empty():
with pytest.raises(ConsolidationError):
TorchBlock.consolidate([])
def test_consolidate_mismatched_signature():
data = torch.stack([torch.arange(8)] * 12)
blocks = [TorchBlock(data.to(int)), TorchBlock(data.to(float))]
slices = [
[0, slice(2, 5, 1)],
[6, slice(2, 7, 2)],
]
cols = [
{
str(slc): TorchTensorColumn(
data=BlockView(
block=blocks[block_idx],
block_index=slc,
)
)
for slc in slices[block_idx]
}
for block_idx in range(2)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
with pytest.raises(ConsolidationError):
TorchBlock.consolidate(block_refs)
def test_io(tmpdir):
torch.manual_seed(123)
block = TorchBlock(torch.randn(100, 10))
block.write(tmpdir)
new_block = TorchBlock.read(tmpdir)
assert isinstance(block, TorchBlock)
assert (block.data == new_block.data).all()
| meerkat-main | tests/meerkat/block/test_tensor_block.py |
import numpy as np
import pyarrow as pa
import pytest
from meerkat.block.abstract import BlockView
from meerkat.block.arrow_block import ArrowBlock
from meerkat.block.ref import BlockRef
from meerkat.columns.scalar.arrow import ArrowScalarColumn
from meerkat.errors import ConsolidationError
def test_signature_hash():
# check equal
block1 = ArrowBlock(pa.Table.from_pydict({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block2 = ArrowBlock(pa.Table.from_pydict({"c": [1, 2, 3], "d": ["4", "5", "6"]}))
assert hash(block1.signature) == hash(block2.signature)
# check not equal
block1 = ArrowBlock(pa.Table.from_pydict({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block2 = ArrowBlock(pa.Table.from_pydict({"c": [1, 2], "d": ["5", "6"]}))
assert hash(block1.signature) != hash(block2.signature)
@pytest.mark.parametrize("num_blocks", [1, 2, 3])
def test_consolidate_1(num_blocks):
# check equal
blocks = [
ArrowBlock(
pa.Table.from_pydict(
{f"a_{idx}": np.arange(10), f"b_{idx}": np.arange(10) * 2},
)
)
for idx in range(num_blocks)
]
cols = [
{
str(slc): ArrowScalarColumn(
data=BlockView(
block=blocks[idx],
block_index=slc,
)
)
for slc in [f"a_{idx}", f"b_{idx}"]
}
for idx in range(num_blocks)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
block_ref = ArrowBlock.consolidate(block_refs=block_refs)
for ref in block_refs:
block = ref.block
for name, col in ref.items():
assert block.data[col._block_index].equals(
block_ref.block.data[block_ref[name]._block_index]
)
def test_consolidate_empty():
with pytest.raises(ConsolidationError):
ArrowBlock.consolidate([])
def test_consolidate_mismatched_signature():
block1 = ArrowBlock(pa.Table.from_pydict({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block2 = ArrowBlock(pa.Table.from_pydict({"c": [1, 2], "d": ["5", "6"]}))
blocks = [block1, block2]
slices = [
["a", "b"],
["c", "d"],
]
cols = [
{
str(slc): ArrowScalarColumn(
data=BlockView(
block=blocks[block_idx],
block_index=slc,
)
)
for slc in slices[block_idx]
}
for block_idx in range(2)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
with pytest.raises(ConsolidationError):
ArrowBlock.consolidate(block_refs)
def test_io(tmpdir):
block = ArrowBlock(pa.Table.from_pydict({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block.write(tmpdir)
new_block = block.read(tmpdir)
assert isinstance(block, ArrowBlock)
assert block.data.equals(new_block.data)
| meerkat-main | tests/meerkat/block/test_arrow_block.py |
import numpy as np
import pytest
from meerkat import NumPyTensorColumn
from meerkat.block.abstract import BlockView
from meerkat.block.numpy_block import NumPyBlock
from meerkat.block.ref import BlockRef
from meerkat.errors import ConsolidationError
def test_signature_hash():
# check equal
block1 = NumPyBlock(np.zeros((100, 10)))
block2 = NumPyBlock(np.ones((100, 10)))
assert hash(block1.signature) == hash(block2.signature)
# check differing type
block1 = NumPyBlock(np.zeros((100, 10), dtype=int))
block2 = NumPyBlock(np.ones((100, 10), dtype=float))
assert hash(block1.signature) != hash(block2.signature)
# check differing column width okay
block1 = NumPyBlock(np.zeros((100, 13), dtype=int))
block2 = NumPyBlock(np.ones((100, 10), dtype=int))
assert hash(block1.signature) == hash(block2.signature)
# check differing column width okay
block1 = NumPyBlock(np.zeros((100, 13, 15), dtype=int))
block2 = NumPyBlock(np.ones((100, 10, 15), dtype=int))
assert hash(block1.signature) == hash(block2.signature)
# check differing later dimensions not okay
block1 = NumPyBlock(np.zeros((100, 10, 15), dtype=int))
block2 = NumPyBlock(np.ones((100, 10, 20), dtype=int))
assert hash(block1.signature) != hash(block2.signature)
# check differing nrows not okay
block1 = NumPyBlock(np.zeros((90, 10, 15), dtype=int))
block2 = NumPyBlock(np.ones((100, 10, 20), dtype=int))
assert hash(block1.signature) != hash(block2.signature)
@pytest.mark.parametrize("num_blocks", [1, 2, 3])
def test_consolidate_1(num_blocks):
# check equal
data = np.stack([np.arange(8)] * 12)
blocks = [NumPyBlock(data.copy()) for _ in range(num_blocks)]
slices = [
[0, slice(2, 5, 1)],
[6, slice(2, 7, 2)],
[slice(2, 7, 3), slice(1, 8, 1)],
]
cols = [
{
str(slc): NumPyTensorColumn(
data=BlockView(
block=blocks[block_idx],
block_index=slc,
)
)
for slc in slices[block_idx]
}
for block_idx in range(num_blocks)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
block_ref = NumPyBlock.consolidate(block_refs=block_refs)
for ref in block_refs:
block = ref.block
for name, col in ref.items():
assert (
block.data[:, col._block_index]
== block_ref.block.data[:, block_ref[name]._block_index]
).all()
def test_consolidate_empty():
with pytest.raises(ConsolidationError):
NumPyBlock.consolidate([])
def test_consolidate_mismatched_signature():
data = np.stack([np.arange(8)] * 12)
blocks = [NumPyBlock(data.astype(int)), NumPyBlock(data.astype(float))]
slices = [
[0, slice(2, 5, 1)],
[6, slice(2, 7, 2)],
]
cols = [
{
str(slc): NumPyTensorColumn(
data=BlockView(
block=blocks[block_idx],
block_index=slc,
)
)
for slc in slices[block_idx]
}
for block_idx in range(2)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
with pytest.raises(ConsolidationError):
NumPyBlock.consolidate(block_refs)
def test_io(tmpdir):
np.random.seed(123)
block = NumPyBlock(np.random.randn(100, 10))
block.write(tmpdir)
new_block = NumPyBlock.read(tmpdir)
assert isinstance(block, NumPyBlock)
assert (block.data == new_block.data).all()
| meerkat-main | tests/meerkat/block/test_numpy_block.py |
meerkat-main | tests/meerkat/block/__init__.py |
|
import numpy as np
import pandas as pd
import pytest
from meerkat import ScalarColumn
from meerkat.block.abstract import BlockView
from meerkat.block.pandas_block import PandasBlock
from meerkat.block.ref import BlockRef
from meerkat.errors import ConsolidationError
def test_signature_hash():
# check equal
block1 = PandasBlock(pd.DataFrame({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block2 = PandasBlock(pd.DataFrame({"c": [1, 2, 3], "d": ["4", "5", "6"]}))
assert hash(block1.signature) == hash(block2.signature)
# check equal
block1 = PandasBlock(pd.DataFrame({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block2 = PandasBlock(pd.DataFrame({"c": [1, 2], "d": ["5", "6"]}))
assert hash(block1.signature) != hash(block2.signature)
@pytest.mark.parametrize("num_blocks", [1, 2, 3])
def test_consolidate_1(num_blocks):
# check equal
blocks = [
PandasBlock(
pd.DataFrame(
{f"a_{idx}": np.arange(10), f"b_{idx}": np.arange(10) * 2},
index=np.arange(idx, idx + 10), # need to test with different
)
)
for idx in range(num_blocks)
]
cols = [
{
str(slc): ScalarColumn(
data=BlockView(
block=blocks[idx],
block_index=slc,
)
)
for slc in [f"a_{idx}", f"b_{idx}"]
}
for idx in range(num_blocks)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
block_ref = PandasBlock.consolidate(block_refs=block_refs)
for ref in block_refs:
block = ref.block
for name, col in ref.items():
assert (
block.data[col._block_index].reset_index(drop=True)
== block_ref.block.data[block_ref[name]._block_index]
).all()
def test_consolidate_empty():
with pytest.raises(ConsolidationError):
PandasBlock.consolidate([])
def test_consolidate_mismatched_signature():
block1 = PandasBlock(pd.DataFrame({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block2 = PandasBlock(pd.DataFrame({"c": [1, 2], "d": ["5", "6"]}))
blocks = [block1, block2]
slices = [
["a", "b"],
["c", "d"],
]
cols = [
{
str(slc): ScalarColumn(
data=BlockView(
block=blocks[block_idx],
block_index=slc,
)
)
for slc in slices[block_idx]
}
for block_idx in range(2)
]
block_refs = [
BlockRef(block=block, columns=cols) for block, cols in zip(blocks, cols)
]
with pytest.raises(ConsolidationError):
PandasBlock.consolidate(block_refs)
def test_io(tmpdir):
block = PandasBlock(pd.DataFrame({"a": [1, 2, 3], "b": ["4", "5", "6"]}))
block.write(tmpdir)
new_block = block.read(tmpdir)
assert isinstance(block, PandasBlock)
assert block.data.equals(new_block.data)
# test with non-contiguous index, which is not supported by feather
block = PandasBlock(
pd.DataFrame({"a": [1, 2, 3], "b": ["4", "5", "6"]}, index=np.arange(1, 4))
)
block.write(tmpdir)
new_block = block.read(tmpdir)
assert isinstance(block, PandasBlock)
assert block.data.reset_index(drop=True).equals(new_block.data)
| meerkat-main | tests/meerkat/block/test_pandas_block.py |
import numpy as np
from meerkat import DeferredColumn, TensorColumn
from meerkat.block.deferred_block import DeferredBlock, DeferredOp
from meerkat.block.ref import BlockRef
from ...utils import product_parametrize
def fn(x: int) -> int:
return x + 1, x + 2, x + 3
@product_parametrize(params={"num_blocks": [1, 2, 3]})
def test_consolidate(num_blocks: int):
inp = TensorColumn(np.arange(8))
op = DeferredOp(args=[inp], fn=fn, kwargs={}, is_batched_fn=False, batch_size=1)
block_views = [
DeferredBlock.from_column_data(data=op.with_return_index(i)) for i in range(3)
]
cols = [
{str(block_view.block_index): DeferredColumn(data=block_view)}
for block_view in block_views
]
block_ref = DeferredBlock.consolidate(
block_refs=[
BlockRef(
block=block_view.block,
columns=col,
)
for block_view, col in zip(block_views, cols)
]
)
assert isinstance(block_ref, BlockRef)
for name, col in block_ref.items():
assert col._block is block_ref.block
assert int(name) == col._block_index
for i in range(num_blocks):
assert (block_ref[str(i)]().data == cols[i][str(i)]().data).all()
def test_consolidate_same_index():
inp = TensorColumn(np.arange(8))
op = DeferredOp(args=[inp], fn=fn, kwargs={}, is_batched_fn=False, batch_size=1)
block_views = [
DeferredBlock.from_column_data(data=op.with_return_index(0)),
DeferredBlock.from_column_data(data=op.with_return_index(0)),
DeferredBlock.from_column_data(data=op.with_return_index(1)),
]
cols = [
{str(i): DeferredColumn(data=block_view)}
for i, block_view in enumerate(block_views)
]
block_ref = DeferredBlock.consolidate(
block_refs=[
BlockRef(
block=block_view.block,
columns=col,
)
for block_view, col in zip(block_views, cols)
]
)
assert isinstance(block_ref, BlockRef)
for _, col in block_ref.items():
assert col._block is block_ref.block
for i in range(len(block_views)):
assert (block_ref[str(i)]().data == cols[i][str(i)]().data).all()
| meerkat-main | tests/meerkat/block/test_deferred_block.py |
import os
from itertools import product
import numpy as np
import pytest
import torch
import meerkat as mk
from meerkat.block.manager import BlockManager
from meerkat.tools.utils import load_yaml
from ...utils import product_parametrize
def test_consolidate_no_op():
mgr = BlockManager()
col1 = mk.TensorColumn(data=np.arange(10))
mgr.add_column(col1, "a")
col2 = mk.TensorColumn(np.arange(10) * 2)
mgr.add_column(col2, "b")
col2 = mk.TensorColumn(np.arange(10, dtype=float) * 2)
mgr.add_column(col2, "c")
block_ref = mgr.get_block_ref("c")
assert len(mgr._block_refs) == 3
mgr.consolidate()
assert len(mgr._block_refs) == 2
# assert that the block_ref hasn't changed for the isolated block ref
assert mgr.get_block_ref("c") is block_ref
def test_consolidate():
mgr = BlockManager()
col1 = mk.TensorColumn(data=np.arange(10))
mgr.add_column(col1, "col1")
col2 = mk.TensorColumn(np.arange(10) * 2)
mgr.add_column(col2, "col2")
col3 = mk.ScalarColumn(np.arange(10) * 3)
mgr.add_column(col3, "col3")
col4 = mk.ScalarColumn(np.arange(10) * 4)
mgr.add_column(col4, "col4")
col5 = mk.TensorColumn(torch.arange(10) * 5)
mgr.add_column(col5, "col5")
col6 = mk.TensorColumn(torch.arange(10) * 6)
mgr.add_column(col6, "col6")
col9 = mk.TensorColumn(torch.ones(10, 5).to(int) * 9)
mgr.add_column(col9, "col9")
assert len(mgr._block_refs) == 7
mgr.consolidate()
assert len(mgr._block_refs) == 3
# check that the same object backs both the block and the column
for name, col in [("col1", col1), ("col2", col2)]:
assert mgr[name].data.base is mgr.get_block_ref(name).block.data
assert (mgr[name] == col).all()
# check that the same object backs both the block and the column
for name, col in [("col3", col3), ("col4", col4)]:
assert mgr[name].data is mgr.get_block_ref(name).block.data[name]
assert (mgr[name] == col).all()
# check that the same object backs both the bock
for name, col in [("col5", col5), ("col6", col6)]:
# TODO (sabri): Figure out a way to check this for tensors
assert (mgr[name] == col).all()
def test_consolidate_multiple_types():
mgr = BlockManager()
for dtype in [int, float]:
for idx in range(3):
col = mk.TensorColumn(np.arange(10, dtype=dtype))
mgr.add_column(col, f"col{idx}_{dtype}")
mgr.add_column(mk.ScalarColumn(np.arange(10) * 4), "col4_pandas")
mgr.add_column(mk.ScalarColumn(np.arange(10) * 5), "col5_pandas")
assert len(mgr._block_refs) == 8
mgr.consolidate()
assert len(mgr._block_refs) == 3
def test_consolidate_preserves_order():
mgr = BlockManager()
col1 = mk.TensorColumn(data=np.arange(10))
mgr.add_column(col1, "col1")
col2 = mk.TensorColumn(np.arange(10) * 2)
mgr.add_column(col2, "col2")
col3 = mk.ScalarColumn(np.arange(10) * 3)
mgr.add_column(col3, "col3")
order = ["col2", "col3", "col1"]
mgr.reorder(order)
assert list(mgr.keys()) == order
mgr.consolidate()
assert list(mgr.keys()) == order
@pytest.mark.parametrize(
"num_blocks, consolidated",
product([1, 2, 3], [True, False]),
)
def test_apply_get_multiple(num_blocks, consolidated):
mgr = BlockManager()
for dtype in [int, float]:
for idx in range(num_blocks):
col = mk.TensorColumn(np.arange(10, dtype=dtype) * idx)
mgr.add_column(col, f"col{idx}_{dtype}")
if consolidated:
mgr.consolidate()
for slc in [
slice(2, 6, 1),
slice(0, 1, 1),
slice(2, 8, 3),
np.array([1, 4, 6]),
np.array([True, False] * 5),
]:
new_mgr = mgr.apply(method_name="_get", index=slc)
assert isinstance(new_mgr, BlockManager)
for dtype in [int, float]:
for idx in range(num_blocks):
# check it's equivalent to applying the slice to each column in turn
assert (
new_mgr[f"col{idx}_{dtype}"].data
== mgr[f"col{idx}_{dtype}"][slc].data
).all()
# check that the base is the same (since we're just slicing)
assert (
new_mgr[f"col{idx}_{dtype}"].data.base
is mgr[f"col{idx}_{dtype}"][slc].data.base
) == isinstance(slc, slice)
@pytest.mark.parametrize(
"num_blocks, consolidated",
product([1, 2, 3], [True, False]),
)
def test_apply_get_single(num_blocks, consolidated):
mgr = BlockManager()
for dtype in [int, float]:
for idx in range(num_blocks):
col = mk.TensorColumn(np.arange(10, dtype=dtype) * idx)
mgr.add_column(col, f"col{idx}_{dtype}")
if consolidated:
mgr.consolidate()
for slc in [0, 8]:
result_dict = mgr.apply(method_name="_get", index=slc)
assert isinstance(result_dict, dict)
for dtype in [int, float]:
for idx in range(num_blocks):
# check it's equivalent to applying the slice to each column in turn
assert result_dict[f"col{idx}_{dtype}"] == mgr[f"col{idx}_{dtype}"][slc]
@pytest.fixture()
def call_count(monkeypatch):
from meerkat import TensorColumn
from meerkat.block.numpy_block import NumPyBlock
calls = {"count": 0}
block_get = NumPyBlock._get
def patched_get(self, *args, **kwargs):
nonlocal calls
calls["count"] += 1
return block_get(self, *args, **kwargs)
monkeypatch.setattr(NumPyBlock, "_get", patched_get)
col_get = TensorColumn._get
def patched_get_col(self, *args, **kwargs):
nonlocal calls
calls["count"] += 1
return col_get(self, *args, **kwargs)
monkeypatch.setattr(TensorColumn, "_get", patched_get_col)
return calls
@product_parametrize({"consolidated": [True, False]})
def test_apply_get_single_lambda(call_count, consolidated):
mgr = BlockManager()
base_col = mk.TensorColumn(np.arange(10))
mgr.add_column(base_col, "a")
# lambda_column = base_col.defer(lambda x: x + 2)
# mgr.add_column(lambda_column, "b")
if consolidated:
mgr.consolidate()
mgr.apply(method_name="_get", index=1, materialize=True)
# we should only call NumpyBlock._get once
assert call_count["count"] == 1
@product_parametrize({"consolidated": [True, False]})
def test_apply_get_multiple_lambda(call_count, consolidated):
mgr = BlockManager()
base_col = mk.TensorColumn(np.arange(10))
mgr.add_column(base_col, "a")
lambda_column = base_col.defer(lambda x: x + 2)
mgr.add_column(lambda_column, "b")
if consolidated:
mgr.consolidate()
new_mgr = mgr.apply(method_name="_get", index=[1, 3, 5], materialize=False)
# the columns should stil be linked after an index
assert new_mgr["b"].data.args[0] is new_mgr["a"]
# we should only call NumpyBlock._get once
assert call_count["count"] == 1
@pytest.mark.parametrize(
"consolidated",
[True, False],
)
def test_remove(consolidated):
mgr = BlockManager()
col = mk.TensorColumn(np.arange(10))
mgr.add_column(col, "a")
col = mk.TensorColumn(np.arange(10))
mgr.add_column(col, "b")
if consolidated:
mgr.consolidate()
assert len(mgr) == 2
assert len(mgr._block_refs) == 1 if consolidated else 2
mgr.remove("a")
assert len(mgr) == 1
assert list(mgr.keys()) == ["b"]
assert len(mgr._block_refs) == 1
with pytest.raises(
expected_exception=ValueError,
match="Remove failed: no column 'c' in BlockManager.",
):
mgr.remove("c")
def test_getitem():
mgr = BlockManager()
a = mk.TensorColumn(np.arange(10))
mgr.add_column(a, "a")
b = mk.TensorColumn(np.arange(10))
mgr.add_column(b, "b")
# check that manager holds coreference of original column, but returns a coreference
assert mgr["a"] is a
assert mgr["a"] is mgr["a"]
with pytest.raises(
ValueError,
match="Unsupported index of type `<class 'int'>` passed to `BlockManager`.",
):
mgr[0]
out = mgr[["a", "b"]]
assert isinstance(out, BlockManager)
# check that manager holds reference of original column, and returns a coreference
assert mgr["a"].data.base is out["a"].data.base
assert mgr["a"] is out["a"]
assert out["a"] is out["a"]
with pytest.raises(ValueError, match="`BlockManager` does not contain column 'c'."):
mgr[["a", "c"]]
def test_setitem():
mgr = BlockManager()
a = mk.TensorColumn(np.arange(10))
mgr["a"] = a
b = mk.TensorColumn(np.arange(10)) * 2
mgr["b"] = b
# check that manager holds coreference of original column, and returns a coreference
assert mgr["a"] is a
assert mgr["a"] is mgr["a"]
with pytest.raises(
ValueError,
match="Cannot set item with object of type `<class 'int'>` on `BlockManager`.",
):
mgr["a"] = 1
def test_contains():
mgr = BlockManager()
col = mk.TensorColumn(np.arange(10))
mgr.add_column(col, "a")
col = mk.TensorColumn(np.arange(10))
mgr.add_column(col, "b")
assert "a" in mgr
assert "b" in mgr
assert "c" not in mgr
@pytest.mark.parametrize(
"num_blocks, consolidated",
product([1, 2, 3], [True, False]),
)
def test_len(num_blocks, consolidated):
mgr = BlockManager()
for dtype in [int, float]:
for idx in range(num_blocks):
col = mk.TensorColumn(np.arange(10, dtype=dtype) * idx)
mgr.add_column(col, f"col{idx}_{dtype}")
if consolidated:
mgr.consolidate()
assert len(mgr) == num_blocks * 2
def test_io(tmpdir):
tmpdir = os.path.join(tmpdir, "test")
mgr = BlockManager()
col1 = mk.TensorColumn(data=np.arange(10))
mgr.add_column(col1, "col1")
col2 = mk.TensorColumn(np.arange(10) * 2)
mgr.add_column(col2, "col2")
col3 = mk.ScalarColumn(np.arange(10) * 3)
mgr.add_column(col3, "col3")
col4 = mk.ScalarColumn(np.arange(10) * 4)
mgr.add_column(col4, "col4")
col5 = mk.TensorColumn(torch.arange(10) * 5)
mgr.add_column(col5, "col5")
col6 = mk.TensorColumn(torch.arange(10) * 6)
mgr.add_column(col6, "col6")
col7 = mk.ObjectColumn(list(range(10)))
mgr.add_column(col7, "col7")
col8 = mk.ObjectColumn(list(range(10)))
mgr.add_column(col8, "col8")
col9 = mk.TensorColumn(torch.ones(10, 5).to(int) * 9)
mgr.add_column(col9, "col9")
assert len(mgr._block_refs) == 7
mgr.write(tmpdir)
new_mgr = BlockManager.read(tmpdir)
assert len(new_mgr._block_refs) == 3
for idx in range(1, 7):
assert (mgr[f"col{idx}"] == new_mgr[f"col{idx}"]).all()
for idx in range(7, 8):
assert mgr[f"col{idx}"].data == new_mgr[f"col{idx}"].data
# test overwriting
col1 = mk.TensorColumn(data=np.arange(10) * 100)
mgr.add_column(col1, "col1")
mgr.remove("col9")
assert "col9" in load_yaml(os.path.join(tmpdir, "meta.yaml"))["columns"]
mgr.write(tmpdir)
# make sure the old column was removed
assert "col9" not in load_yaml(os.path.join(tmpdir, "meta.yaml"))["columns"]
new_mgr = BlockManager.read(tmpdir)
assert len(new_mgr._block_refs) == 3
for idx in range(1, 7):
assert (mgr[f"col{idx}"] == new_mgr[f"col{idx}"]).all()
for idx in range(7, 8):
assert mgr[f"col{idx}"].data == new_mgr[f"col{idx}"].data
def test_io_no_overwrite(tmpdir):
new_dir = os.path.join(tmpdir, "test")
os.mkdir(new_dir)
mgr = BlockManager()
with pytest.raises(
IsADirectoryError,
match=f"Cannot write `BlockManager`. {new_dir} is a directory.",
):
mgr.write(new_dir)
@product_parametrize(
{
"column_type": [
mk.TensorColumn,
mk.PandasScalarColumn,
mk.ArrowScalarColumn,
mk.TensorColumn,
],
"column_order": [("z", "a"), ("a", "z")],
}
)
def test_io_lambda_args(tmpdir, column_type, column_order):
mgr = BlockManager()
base_col_name, col_name = column_order
base_col = column_type(np.arange(16))
mgr.add_column(base_col, base_col_name) # want to order backwards
lambda_column = base_col.defer(lambda x: x + 2)
mgr.add_column(lambda_column, col_name)
mgr.write(os.path.join(tmpdir, "test"))
new_mgr = BlockManager.read(os.path.join(tmpdir, "test"))
# ensure that in the loaded df, the lambda column points to the same
# underlying data as the base column
assert new_mgr[col_name].data.args[0] is new_mgr[base_col_name]
# ensure that the the base column was not written twice
# check that dir is empty
block_id = mgr._column_to_block_id[col_name]
assert not os.listdir(
os.path.join(tmpdir, "test", f"blocks/{block_id}", "data.op/args")
)
assert not os.listdir(
os.path.join(tmpdir, "test", f"blocks/{block_id}", "data.op/kwargs")
)
@product_parametrize(
{
"column_type": [
mk.NumPyTensorColumn,
mk.PandasScalarColumn,
mk.ArrowScalarColumn,
mk.TorchTensorColumn,
]
}
)
def test_io_chained_lambda_args(tmpdir, column_type):
mgr = BlockManager()
base_col = column_type(np.arange(16))
mgr.add_column(base_col, "a")
lambda_column = base_col.defer(lambda x: x + 2)
mgr.add_column(lambda_column, "b")
second_lambda_column = lambda_column.defer(lambda x: x + 2)
mgr.add_column(second_lambda_column, "c")
mgr.write(os.path.join(tmpdir, "test"))
new_mgr = BlockManager.read(os.path.join(tmpdir, "test"))
# ensure that in the loaded df, the lambda column points to the same
# underlying data as the base column
# TODO: this should work once we get topological sort correct
assert new_mgr["c"].data.args[0] is new_mgr["b"]
# ensure that the the base column was not written twice
# check that dir is empty
block_id = mgr._column_to_block_id["c"]
assert not os.listdir(
os.path.join(tmpdir, "test", f"blocks/{block_id}", "data.op/args")
)
assert not os.listdir(
os.path.join(tmpdir, "test", f"blocks/{block_id}", "data.op/kwargs")
)
def test_topological_block_refs():
mgr = BlockManager()
base_col = mk.TensorColumn(np.arange(16))
lambda_columns = []
expected_order = [id(base_col._block)]
curr_col = base_col
for _ in range(10):
curr_col = curr_col.defer(lambda x: x + 2)
expected_order.append(id(curr_col._block))
lambda_columns.append(curr_col)
# add to manager in reversed order
for i, col in enumerate(lambda_columns[::-1]):
mgr.add_column(col, f"lambda_{i}")
mgr.add_column(base_col, "base")
sorted_block_refs = list(list(zip(*mgr.topological_block_refs()))[0])
assert sorted_block_refs == expected_order
def test_topological_block_refs_w_gap():
mgr = BlockManager()
base_col = mk.TensorColumn(np.arange(16))
lambda_columns = []
curr_col = base_col
for _ in range(10):
curr_col = curr_col.defer(lambda x: x + 2)
lambda_columns.append(curr_col)
mgr.add_column(lambda_columns[0], "first")
mgr.add_column(lambda_columns[-2], "second_to_last")
mgr.add_column(lambda_columns[-1], "last")
mgr.add_column(base_col, "base")
expected_order = [
id(base_col._block),
id(lambda_columns[0]._block),
id(lambda_columns[-2]._block),
id(lambda_columns[-1]._block),
]
sorted_block_refs = list(list(zip(*mgr.topological_block_refs()))[0])
# because there is a gap, we cannot guarantee the global order of the blocks
# at some point, we may want to support this, but for the time being we don't
# need to support this
assert sorted_block_refs.index(id(base_col._block)) < sorted_block_refs.index(
id(lambda_columns[0]._block)
)
assert sorted_block_refs.index(
id(lambda_columns[-2]._block)
) < sorted_block_refs.index(id(lambda_columns[-1]._block))
assert len(sorted_block_refs) == len(expected_order)
| meerkat-main | tests/meerkat/block/test_manager.py |
"""Functions to generate certain RST files."""
# import math
import inspect
import os
import pathlib
from collections import defaultdict
from typing import List, Union
# import numpy as np
import pandas as pd
import meerkat as mk
_DIR = pathlib.Path(os.path.dirname(os.path.abspath(__file__)))
def _replace_contents_in_file(fpath, *, key: str, contents: Union[str, List[str]]):
"""Replace contents of a file that are between the delimiters.
Delimiters should be Markdown style comments formatted with the key.
For example:
<!---autogen-start: my-key-->
<!---autogen-end: my-key-->
"""
if isinstance(contents, str):
contents = [contents]
contents = [line + "\n" if not line.endswith("\n") else line for line in contents]
if not contents[-1].endswith("\n"):
contents += ["\n"]
start_delimiter = f"<!---autogen-start: {key}-->"
end_delimiter = f"<!---autogen-end: {key}-->"
# Read in the file
with open(fpath, "r") as file:
lines = file.readlines()
start_indices = [
idx for idx, line in enumerate(lines) if line.strip() == start_delimiter
]
end_indices = [
idx for idx, line in enumerate(lines) if line.strip() == end_delimiter
]
if len(start_indices) != len(end_indices):
raise ValueError(f"Number of start and end delimiters do not match in {fpath}.")
if len(start_indices) == 0:
raise ValueError(f"No start and end delimiters found in {fpath}.")
# Replace the content between the delimiters.
brackets = lines[: start_indices[0] + 1] + contents
for end_idx, next_start_idx in zip(end_indices, start_indices[1:]):
brackets.extend(lines[end_idx : next_start_idx + 1])
brackets.extend(contents)
brackets.extend(lines[end_indices[-1] :])
# Write the file out again
with open(fpath, "w") as file:
file.writelines(brackets)
def generate_inbuilt_reactive_fns():
"""Generate the inbuilt reactive functions RST file."""
fpath = _DIR / "user-guide" / "interactive" / "reactive-functions" / "inbuilts.rst"
_REACTIVE_FNS = {
"General": [
"all",
"any",
"bool",
"complex",
"float",
"hex",
"int",
"len",
"oct",
"str",
"list",
"tuple",
"sum",
"dict",
"set",
"range",
"abs",
],
"Boolean Operations": ["cand", "cnot", "cor"],
"DataFrame Operations": [
"concat",
"merge",
"sort",
"sample",
"shuffle",
"groupby",
"clusterby",
"aggregate",
"explainby",
],
}
lines = [
".. _reactivity_inbuilts:",
"",
"Reactive Functions in Meerkat",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
"Meerkat provides some reactive functions out of the box.",
]
# Add the reactive functions to the lines.
for category, fns in _REACTIVE_FNS.items():
fns = sorted(fns)
lines.append("")
lines.append(category)
lines.append("-" * len(category))
lines.append("")
lines.append(".. autosummary::")
lines.append(" :toctree: ../../apidocs/generated")
lines.append(" :nosignatures:")
lines.append("")
for fn in fns:
assert hasattr(mk, fn), f"mk.{fn} is not a function in Meerkat."
mk_fn = getattr(mk, fn)
assert (
hasattr(mk_fn, "__wrapper__") and mk_fn.__wrapper__ == "reactive"
), f"mk.{fn} is not a reactive function."
lines.append(f" meerkat.{fn}")
with open(fpath, "w") as f:
for line in lines:
f.write(line + "\n")
def generate_store_operators():
"""Generate table of store operators that are reactive."""
fpath = _DIR / "user-guide" / "interactive" / "stores" / "inbuilts.md"
operators = [
# Arthmetic
"Addition, +, x + y",
"Subtraction, -, x - y",
"Multiplication, *, x * y",
"Division, /, x / y",
"Floor Division, //, x // y",
"Modulo, %, x % y",
"Exponentiation, **, x ** y",
# Assignment
"Add & Assign, +=, x+=1",
"Subtract & Assign, -=, x-=1",
"Multiply & Assign, *=, x*=1",
"Divide & Assign, /=, x/=1",
"Floor Divide & Assign, //=, x//=1",
"Modulo & Assign, %=, x%=1",
"Exponentiate & Assign, **=, x**=1",
"Power & Assign, **=, x**=1",
"Bitwise Left Shift & Assign, <<=, x<<=1",
"Bitwise Right Shift & Assign, >>=, x>>=1",
"Bitwise AND & Assign, &=, x&=1",
"Bitwise XOR & Assign, ^=, x^=1",
"Bitwise OR & Assign, \|=, x\|=1",
# Bitwise
"Bitwise Left Shift, <<, x << y",
"Bitwise Right Shift, >>, x >> y",
"Bitwise AND, &, x & y",
"Bitwise XOR, ^, x ^ y",
"Bitwise OR, \|, x \| y",
"Bitwise Inversion, ~, ~x",
# Comparison
"Less Than, <, x < y",
"Less Than or Equal, <=, x <= y",
"Equal, ==, x == y",
"Not Equal, !=, x != y",
"Greater Than, >, x > y",
"Greater Than or Equal, >=, x >= y",
# Get item
"Get Item, [key], x[0]",
"Get Slice, [start:stop], x[0:10]",
]
content = defaultdict(list)
for operator in operators:
name, symbol, example = [x.strip() for x in operator.split(",")]
content["name"].append(name)
content["symbol"].append(symbol)
content["example"].append(example)
df = pd.DataFrame(content)
content_markdown = df.to_markdown(index=False)
_replace_contents_in_file(
fpath, key="mk-store-reactive-operators", contents=content_markdown
)
def generate_common_inplace_methods():
"""Generate table of common inplace methods."""
fpath = _DIR / "advanced" / "magic-contexts" / "limitations.md"
# Examples of common inplace methods include:
methods = [
# list
"list.append",
"list.extend",
"list.insert",
"list.remove",
"list.pop",
"list.clear",
"list.sort",
"list.reverse",
# dict
"dict.clear",
"dict.pop",
"dict.popitem",
"dict.setdefault",
"dict.update",
# set
"set.add",
"set.clear",
"set.discard",
"set.pop",
"set.remove",
"set.update",
]
methods = sorted(methods)
# num_columns = range(1, 5)
# num_rows = [math.ceil(len(methods) / ncol) for ncol in num_columns]
# remainder = [
# nrow * ncol - len(methods) for nrow, ncol in zip(num_rows, num_columns)
# ]
# min_remainder_loc = np.argmin(remainder)
# ncols = num_columns[min_remainder_loc]
# nrows = num_rows[min_remainder_loc]
# TODO: make a table
methods = ["- {py:meth}" + f"`{method}`" for method in methods]
_replace_contents_in_file(fpath, key="common-inplace-methods", contents=methods)
def generate_components_doc():
"""Generate autosummary doc for all components."""
fpath = _DIR / "user-guide" / "interactive" / "components" / "inbuilts.rst"
component_libs = ["core", "html", "plotly", "flowbite"]
_COMPONENTS = {}
for name in component_libs:
components = []
for klass_name in dir(getattr(mk.gui, name)):
klass = getattr(getattr(mk.gui, name), klass_name)
if inspect.isclass(klass) and issubclass(klass, mk.gui.Component):
components.append(
f"meerkat.interactive.app.src.lib.component.{name}.{klass_name}"
)
components = sorted(components)
_COMPONENTS[name] = components
lines = [
".. _components_inbuilts:",
"",
"Meerkat Components",
"^^^^^^^^^^^^^^^^^^^^^^^^^^^^^",
"Meerkat provides some components out of the box.",
]
# Add the reactive functions to the lines.
for category, components in _COMPONENTS.items():
lines.append("")
lines.append(category)
lines.append("-" * len(category))
lines.append("")
lines.append(".. autosummary::")
lines.append(" :toctree: ../../apidocs/generated")
lines.append(" :nosignatures:")
lines.append("")
for component in components:
lines.append(f" {component}")
os.makedirs(os.path.dirname(fpath), exist_ok=True)
with open(fpath, "w+") as f:
for line in lines:
f.write(line + "\n")
if __name__ == "__main__":
generate_inbuilt_reactive_fns()
generate_store_operators()
generate_common_inplace_methods()
generate_components_doc()
| meerkat-main | docs/source/rst_gen.py |
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import subprocess
import sys
from distutils.util import convert_path
main_ns = {}
ver_path = convert_path("../../meerkat/version.py")
with open(ver_path) as ver_file:
exec(ver_file.read(), main_ns)
sys.path.insert(0, os.path.abspath(""))
sys.path.insert(0, os.path.abspath(".."))
sys.path.insert(0, os.path.abspath("../.."))
sys.setrecursionlimit(1500)
# -- Project information -----------------------------------------------------
project = "Meerkat"
copyright = "2023 Meerkat"
author = "The Meerkat Team"
# The full version, including alpha/beta/rc tags
# release = "0.0.0dev"
version = release = main_ns["__version__"]
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.autosummary",
"sphinx.ext.coverage",
"sphinx.ext.napoleon",
"sphinx.ext.viewcode",
"sphinx.ext.todo",
"nbsphinx",
# "recommonmark",
"IPython.sphinxext.ipython_directive",
"IPython.sphinxext.ipython_console_highlighting",
"myst_nb",
"sphinx.ext.intersphinx",
"sphinx_book_theme",
"sphinx_external_toc",
"sphinx_design",
"sphinx_toolbox.collapse",
"sphinx_copybutton",
"sphinx_remove_toctrees",
]
autosummary_generate = True
autosummary_imported_members = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = []
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "sphinx_book_theme"
html_title = f"🔮 v{version}"
html_logo = "../assets/meerkat_banner_padded.svg"
html_favicon = "../assets/meerkat_logo.png"
# html_sidebars = {"**": ["sbt-sidebar-nav.html"]}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ["_static"]
html_css_files = [
"css/custom.css",
]
# From jupyter-book default sphinx config
# https://github.com/executablebooks/jupyter-book/blob/421f6198728b21c94726a10b61776fb4cc097d72/jupyter_book/config.py#L23
html_permalinks_icon = "¶"
html_sourcelink_suffix = ""
numfig = True
panels_add_bootstrap_css = False
# Don't show module names in front of class names.
add_module_names = False
# Intersphinx mappings
intersphinx_mapping = {
"python": ("https://docs.python.org/3", None),
"numpy": ("https://numpy.org/doc/stable", None),
"pandas": ("https://pandas.pydata.org/docs", None),
"pd": ("https://pandas.pydata.org/docs", None),
}
# Set a longer nb execution timeout
nb_execution_timeout = 180
remove_from_toctrees = ["apidocs/generated/*"]
# Sort members by group
autodoc_member_order = "bysource"
# Color Scheme
panels_css_variables = {
"tabs-color-label-active": "rgb(108,72,232)",
"tabs-color-label-inactive": "rgba(108,72,232,0.5)",
}
todo_include_todos = True
source_suffix = {
".rst": "restructuredtext",
".ipynb": "myst-nb",
".myst": "myst-nb",
".md": "myst-nb",
}
external_toc_path = "_toc.yml"
html_theme_options = {
"repository_url": "https://github.com/hazyresearch/meerkat/",
"use_repository_button": True,
"use_issues_button": True,
"use_edit_page_button": True,
"path_to_docs": "doc/source",
"home_page_in_toc": True,
"show_navbar_depth": 2,
"use_sidenotes": True,
"show_toc_level": 2,
"launch_buttons": {
"notebook_interface": "jupyterlab",
"binderhub_url": "https://mybinder.org",
"colab_url": "https://colab.research.google.com",
},
"announcement": "<div class='topnav'></div>",
# "navigation_depth": 3,
}
html_context = {
"display_github": True,
"github_user": "hazyresearch",
"github_repo": "meerkat",
"github_version": "main",
"conf_py_path": "/docs/",
}
def setup(app):
"""Generate the rst files you need."""
# Only run the generation when we are not in livedocs.
# this codeblock causes livedocs to unnecessarily retrigger
if os.environ.get("SPHINX_LIVEDOCS", "false") != "true":
path = os.path.abspath(os.path.dirname(__file__))
path = os.path.join(path, "rst_gen.py")
subprocess.run(["python", path])
| meerkat-main | docs/source/conf.py |
import os
import meerkat as mk
def display_df(df: mk.DataFrame, name: str):
# need to get absolute paths so this works on readthedocs
base_dir = os.path.join(os.path.dirname(os.path.dirname(mk.__file__)), "docs")
body_html = df._repr_html_()
css = open(os.path.join(base_dir, "source/html/display/dataframe.css"), "r").read()
body_html = body_html.replace("\n", f"\n <style> {css} </style>", 1)
open(os.path.join(base_dir, f"source/html/display/{name}.html"), "w").write(
body_html
)
return df
| meerkat-main | docs/source/display.py |
from typing import List
import meerkat as mk
def get_rst_class_ref(klass: type):
return f":class:`dcbench.{klass.__name__}`"
def get_link(text: str, url: str):
return f"`{text} <{url}>`_"
def create_tags_html(tags: List[str]):
tags = "".join([f"<div class='tag'>{tag.replace('_', ' ')}</div>" for tag in tags])
html = f"""
<div class='tags'>
{tags}
</div>
"""
return html
def create_versions_html(versions: List[str]):
versions = "".join(
[f"<div class='tag'>{tag.replace('_', ' ')}</div>" for tag in versions]
)
html = f"""
<div class='versions'>
{versions}
</div>
"""
return html
def get_datasets_table():
df = mk.datasets.catalog
df = df.to_pandas()
df["versions"] = df["name"].apply(
lambda x: create_versions_html(mk.datasets.versions(x))
)
df["homepage"] = df["homepage"].apply(lambda x: f'<a href="{x}">link</a>')
df["tags"] = df["tags"].apply(create_tags_html)
df = df.set_index("name")
df.index.name = None
style = df[["tags", "versions", "homepage"]].style.set_table_styles(
{"description": [{"selector": "", "props": "max-width: 50%;"}]}
)
html = style.to_html(escape=False)
html += """
<style>
.tag {
font-family: monospace;
font-size: 0.8em;
border: 10px;
border-radius: 5px;
border-color: black;
padding-left: 7px;
padding-right: 7px;
padding-top: 0px;
padding-bottom: 0px;
margin: 1px;
}
.tags .tag {
background-color: lightgrey;
}
.versions .tag {
background-color: lightblue;
}
.tags {
display: inline-flex;
flex-wrap: wrap;
}
.versions {
display: inline-flex;
flex-wrap: wrap;
}
</style>
"""
return html
datasets_table = get_datasets_table()
open("source/datasets/datasets_table.html", "w").write(datasets_table)
| meerkat-main | docs/source/datasets/build_datasets_docs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Evaluation script for object localization
import json
import argparse
import torch
import itertools
import numpy as np
from collections import defaultdict
from utils import bbox_overlaps_batch, get_frm_mask
from stanfordcorenlp import StanfordCoreNLP
from tqdm import tqdm
class ANetGrdEval(object):
def __init__(self, reference_file=None, submission_file=None,
split_file=None, val_split=None, iou_thresh=0.5, verbose=False):
if not reference_file:
raise IOError('Please input a valid reference file!')
if not submission_file:
raise IOError('Please input a valid submission file!')
self.iou_thresh = iou_thresh
self.verbose = verbose
self.val_split = val_split
self.import_ref(reference_file, split_file)
self.import_sub(submission_file)
def import_ref(self, reference_file=None, split_file=None):
with open(split_file) as f:
split_dict = json.load(f)
split = {}
for s in self.val_split:
split.update({i:i for i in split_dict[s]})
with open(reference_file) as f:
ref = json.load(f)['annotations']
ref = {k:v for k,v in ref.items() if k in split}
self.ref = ref
def import_sub(self, submission_file=None):
with open(submission_file) as f:
pred = json.load(f)['results']
self.pred = pred
def gt_grd_eval(self):
ref = self.ref
pred = self.pred
print('Number of videos in the reference: {}, number of videos in the submission: {}'.format(len(ref), len(pred)))
results = defaultdict(list)
for vid, anns in ref.items():
for seg, ann in anns['segments'].items():
if len(ann['frame_ind']) == 0:
continue # annotation not available
ref_bbox_all = torch.cat((torch.Tensor(ann['process_bnd_box']), \
torch.Tensor(ann['frame_ind']).unsqueeze(-1)), dim=1) # 5-D coordinates
sent_idx = set(itertools.chain.from_iterable(ann['process_idx'])) # index of word in sentence to evaluate
for idx in sent_idx:
sel_idx = [ind for ind, i in enumerate(ann['process_idx']) if idx in i]
ref_bbox = ref_bbox_all[sel_idx] # select matched boxes
# Note that despite discouraged, a single word could be annotated across multiple boxes/frames
assert(ref_bbox.size(0) > 0)
class_name = ann['process_clss'][sel_idx[0]][ann['process_idx'][sel_idx[0]].index(idx)]
if vid not in pred:
results[class_name].append(0) # video not grounded
elif seg not in pred[vid]:
results[class_name].append(0) # segment not grounded
elif idx not in pred[vid][seg]['idx_in_sent']:
results[class_name].append(0) # object not grounded
else:
pred_ind = pred[vid][seg]['idx_in_sent'].index(idx)
pred_bbox = torch.cat((torch.Tensor(pred[vid][seg]['bbox_for_all_frames'][pred_ind])[:,:4], \
torch.Tensor(range(10)).unsqueeze(-1)), dim=1)
frm_mask = torch.from_numpy(get_frm_mask(pred_bbox[:, 4].numpy(), \
ref_bbox[:, 4].numpy()).astype('uint8'))
overlap = bbox_overlaps_batch(pred_bbox[:, :5].unsqueeze(0), \
ref_bbox[:, :5].unsqueeze(0), frm_mask.unsqueeze(0))
results[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
print('Number of groundable objects in this split: {}'.format(len(results)))
grd_accu = np.mean([sum(hm)*1./len(hm) for i,hm in results.items()])
print('-' * 80)
print('The overall localization accuracy is {:.4f}'.format(grd_accu))
print('-' * 80)
if self.verbose:
print('Object frequency and grounding accuracy per class (descending by object frequency):')
accu_per_clss = {(i, sum(hm)*1./len(hm)):len(hm) for i,hm in results.items()}
accu_per_clss = sorted(accu_per_clss.items(), key=lambda x:x[1], reverse=True)
for accu in accu_per_clss:
print('{} ({}): {:.4f}'.format(accu[0][0], accu[1], accu[0][1]))
return grd_accu
def precision_recall_util(self, mode='all'):
ref = self.ref
pred = self.pred
print('Number of videos in the reference: {}, number of videos in the submission: {}'.format(len(ref), len(pred)))
nlp = StanfordCoreNLP('tools/stanford-corenlp-full-2018-02-27')
props={'annotators': 'lemma','pipelineLanguage':'en', 'outputFormat':'json'}
vocab_in_split = set()
prec = defaultdict(list)
prec_per_sent = defaultdict(list)
for vid, anns in tqdm(ref.items()):
for seg, ann in anns['segments'].items():
if len(ann['frame_ind']) == 0 or vid not in pred or seg not in pred[vid]:
continue # do not penalize if sentence not annotated
prec_per_sent_tmp = [] # for each sentence
ref_bbox_all = torch.cat((torch.Tensor(ann['process_bnd_box']),
torch.Tensor(ann['frame_ind']).unsqueeze(-1)), dim=1) # 5-D coordinates
idx_in_sent = {}
for box_idx, cls_lst in enumerate(ann['process_clss']):
vocab_in_split.update(set(cls_lst))
for cls_idx, cls in enumerate(cls_lst):
idx_in_sent[cls] = idx_in_sent.get(cls, []) + [ann['process_idx'][box_idx][cls_idx]]
sent_idx = set(itertools.chain.from_iterable(ann['process_idx'])) # index of gt object words
exclude_obj = {json.loads(nlp.annotate(token, properties=props)
)['sentences'][0]['tokens'][0]['lemma']: 1 for token_idx, token in
enumerate(ann['tokens']
) if (token_idx not in sent_idx and token != '')}
for pred_idx, class_name in enumerate(pred[vid][seg]['clss']):
if class_name in idx_in_sent:
gt_idx = min(idx_in_sent[class_name]) # always consider the first match...
sel_idx = [idx for idx, i in enumerate(ann['process_idx']) if gt_idx in i]
ref_bbox = ref_bbox_all[sel_idx] # select matched boxes
assert (ref_bbox.size(0) > 0)
pred_bbox = torch.cat((torch.Tensor(pred[vid][seg]['bbox_for_all_frames'][pred_idx])[:, :4],
torch.Tensor(range(10)).unsqueeze(-1)), dim=1)
frm_mask = torch.from_numpy(get_frm_mask(pred_bbox[:, 4].numpy(),
ref_bbox[:, 4].numpy()).astype('uint8'))
overlap = bbox_overlaps_batch(pred_bbox[:, :5].unsqueeze(0),
ref_bbox[:, :5].unsqueeze(0), frm_mask.unsqueeze(0))
prec[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
prec_per_sent_tmp.append(1 if torch.max(overlap) > self.iou_thresh else 0)
elif json.loads(nlp.annotate(class_name, properties=props))['sentences'][0]['tokens'][0]['lemma'] in exclude_obj:
pass # do not penalize if gt object word not annotated (missed)
else:
if mode == 'all':
prec[class_name].append(0) # hallucinated object
prec_per_sent_tmp.append(0)
prec_per_sent[vid + seg] = prec_per_sent_tmp
nlp.close()
# recall
recall = defaultdict(list)
recall_per_sent = defaultdict(list)
for vid, anns in ref.items():
for seg, ann in anns['segments'].items():
if len(ann['frame_ind']) == 0:
# print('no annotation available')
continue
recall_per_sent_tmp = [] # for each sentence
ref_bbox_all = torch.cat((torch.Tensor(ann['process_bnd_box']), \
torch.Tensor(ann['frame_ind']).unsqueeze(-1)), dim=1) # 5-D coordinates
sent_idx = set(itertools.chain.from_iterable(ann['process_idx'])) # index of gt object words
for gt_idx in sent_idx:
sel_idx = [idx for idx, i in enumerate(ann['process_idx']) if gt_idx in i]
ref_bbox = ref_bbox_all[sel_idx] # select matched boxes
# Note that despite discouraged, a single word could be annotated across multiple boxes/frames
assert(ref_bbox.size(0) > 0)
class_name = ann['process_clss'][sel_idx[0]][ann['process_idx'][sel_idx[0]].index(gt_idx)]
if vid not in pred:
recall[class_name].append(0) # video not grounded
recall_per_sent_tmp.append(0)
elif seg not in pred[vid]:
recall[class_name].append(0) # segment not grounded
recall_per_sent_tmp.append(0)
elif class_name in pred[vid][seg]['clss']:
pred_idx = pred[vid][seg]['clss'].index(class_name) # always consider the first match...
pred_bbox = torch.cat((torch.Tensor(pred[vid][seg]['bbox_for_all_frames'][pred_idx])[:,:4], \
torch.Tensor(range(10)).unsqueeze(-1)), dim=1)
frm_mask = torch.from_numpy(get_frm_mask(pred_bbox[:, 4].numpy(), \
ref_bbox[:, 4].numpy()).astype('uint8'))
overlap = bbox_overlaps_batch(pred_bbox[:, :5].unsqueeze(0), \
ref_bbox[:, :5].unsqueeze(0), frm_mask.unsqueeze(0))
recall[class_name].append(1 if torch.max(overlap) > self.iou_thresh else 0)
recall_per_sent_tmp.append(1 if torch.max(overlap) > self.iou_thresh else 0)
else:
if mode == 'all':
recall[class_name].append(0) # object not grounded
recall_per_sent_tmp.append(0)
recall_per_sent[vid + seg] = recall_per_sent_tmp
return prec, recall, prec_per_sent, recall_per_sent, vocab_in_split
def grd_eval(self, mode='all'):
if mode == 'all':
print('Evaluating on all object words.')
elif mode == 'loc':
print('Evaluating only on correctly-predicted object words.')
else:
raise Exception('Invalid loc mode!')
prec, recall, prec_per_sent, rec_per_sent, vocab_in_split = self.precision_recall_util(mode=mode)
# compute the per-class precision, recall, and F1 scores
num_vocab = len(vocab_in_split)
print('Number of groundable objects in this split: {}'.format(num_vocab))
print('Number of objects in prec and recall: {}, {}'.format(len(prec), len(recall)))
prec_cls = np.sum([sum(hm)*1./len(hm) for i,hm in prec.items()])*1./num_vocab
recall_cls = np.sum([sum(hm)*1./len(hm) for i,hm in recall.items()])*1./num_vocab
f1_cls = 2. * prec_cls * recall_cls / (prec_cls + recall_cls)
print('-' * 80)
print('The overall precision_{0} / recall_{0} / F1_{0} are {1:.4f} / {2:.4f} / {3:.4f}'.format(mode, prec_cls, recall_cls, f1_cls))
print('-' * 80)
if self.verbose:
print('Object frequency and grounding accuracy per class (descending by object frequency):')
accu_per_clss = {}
for i in vocab_in_split:
prec_clss = sum(prec[i])*1./len(prec[i]) if i in prec else 0
recall_clss = sum(recall[i])*1./len(recall[i]) if i in recall else 0
accu_per_clss[(i, prec_clss, recall_clss)] = (len(prec[i]), len(recall[i]))
accu_per_clss = sorted(accu_per_clss.items(), key=lambda x:x[1][1], reverse=True)
for accu in accu_per_clss:
print('{} ({} / {}): {:.4f} / {:.4f}'.format(accu[0][0], accu[1][0], accu[1][1], accu[0][1], accu[0][2]))
# compute the per-sent precision, recall, and F1 scores
num_segment_without_labels = 0
prec, rec, f1 = [], [], []
for seg_id, prec_list in prec_per_sent.items():
if rec_per_sent[seg_id] == []:
# skip the segment if no target objects
num_segment_without_labels += 1
else:
current_prec = 0 if prec_list == [] else np.mean(prec_list) # avoid empty prec_list
current_rec = np.mean(rec_per_sent[seg_id])
# if precision and recall are both 0, set the f1 to be 0
if current_prec == 0.0 and current_rec == 0.0:
current_f1_score = 0.0
else:
current_f1_score = 2. * current_prec * current_rec / (current_prec + current_rec) # per-sent F1
prec.append(current_prec)
rec.append(current_rec)
f1.append(current_f1_score)
num_predictions = 0
for _, pred_seg in self.pred.items():
num_predictions += len(pred_seg)
# divide the scores with the total number of predictions
avg_prec = np.sum(prec) / (num_predictions - num_segment_without_labels)
avg_rec = np.sum(rec) / (num_predictions - num_segment_without_labels)
avg_f1 = np.sum(f1) / (num_predictions - num_segment_without_labels)
print('-' * 80)
print('The overall precision_{0}_per_sent / recall_{0}_per_sent / F1_{0}_per_sent are {1:.4f} / {2:.4f} / {3:.4f}'.format(mode, avg_prec, avg_rec, avg_f1))
print('-' * 80)
return prec_cls, recall_cls, f1_cls, avg_prec, avg_rec, avg_f1
def main(args):
grd_evaluator = ANetGrdEval(reference_file=args.reference, submission_file=args.submission,
split_file=args.split_file, val_split=args.split,
iou_thresh=args.iou_thresh, verbose=args.verbose)
if args.eval_mode == 'GT':
print('Assuming the input boxes are based upon GT sentences.')
grd_evaluator.gt_grd_eval()
elif args.eval_mode == 'gen':
print('Assuming the input boxes are based upon generated sentences.')
grd_evaluator.grd_eval(mode=args.loc_mode)
else:
raise Exception('Invalid eval mode!')
if __name__=='__main__':
parser = argparse.ArgumentParser(description='ActivityNet-Entities object grounding evaluation script.')
parser.add_argument('-s', '--submission', type=str, default='', help='submission grounding result file')
parser.add_argument('-r', '--reference', type=str, default='data/anet_entities_cleaned_class_thresh50_trainval.json', help='reference file')
parser.add_argument('--split_file', type=str, default='data/split_ids_anet_entities.json', help='path to the split file')
parser.add_argument('--split', type=str, nargs='+', default=['validation'], help='which split(s) to evaluate')
parser.add_argument('--eval_mode', type=str, default='GT',
help='GT | gen, indicating whether the input is on GT sentences or generated sentences')
parser.add_argument('--loc_mode', type=str, default='all',
help='all | loc, when the input is on generate sentences, whether consider language error or not')
parser.add_argument('--iou_thresh', type=float, default=0.5, help='the iou threshold for grounding correctness')
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
main(args)
| ActivityNet-Entities-main | scripts/eval_grd_anet_entities.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to print stats on the NP annotation file
import numpy as np
import json
import csv
import sys
src_file = sys.argv[1] # 'anet_entities.json'
dataset_file = sys.argv[2] # 'anet_captions_all_splits.json'
split_file = sys.argv[3] # 'split_ids_anet_entities.json'
if __name__ == '__main__':
with open(src_file) as f:
data = json.load(f)['database']
with open(dataset_file) as f:
raw_data = json.load(f)
split_dict = {}
with open(split_file) as f:
split = json.load(f)
for s,ids in split.items():
split_dict.update({i:s for i in ids})
num_seg = np.sum([len(dat['segments']) for vid, dat in data.items()])
total_box = {}
total_dur = []
seg_splits = {}
for vid, dat in data.items():
for seg, ann in dat['segments'].items():
total_box[split_dict[vid]] = total_box.get(split_dict[vid], 0)+len(ann['objects'])
total_dur.append(float(raw_data[vid]['timestamps'][int(seg)][1]-raw_data[vid]['timestamps'][int(seg)][0]))
seg_splits[split_dict[vid]] = seg_splits.get(split_dict[vid], 0)+1
print('number of annotated video: {}'.format(len(data)))
print('number of annotated video segments: {}'.format(num_seg))
print('number of segments in each split: {}'.format(seg_splits))
print('total duration in hr: {}'.format(np.sum(total_dur)/3600))
print('total number of noun phrase boxes: {}'.format(total_box))
| ActivityNet-Entities-main | scripts/anet_entities_np_stats.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Based on
# https://github.com/jiasenlu/NeuralBabyTalk/blob/master/misc/bbox_transform.py
# Licensed under The MIT License
# Copyright (c) 2017 Jiasen Lu
# --------------------------------------------------------
# Reorganized and modified by Jianwei Yang and Jiasen Lu
# --------------------------------------------------------
# --------------------------------------------------------
# Fast R-CNN
# Copyright (c) 2015 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ross Girshick
# --------------------------------------------------------
import torch
import numpy as np
def bbox_overlaps_batch(anchors, gt_boxes, frm_mask=None):
"""
anchors: (N, 4) ndarray of float
gt_boxes: (b, K, 5) ndarray of float
frm_mask: (b, N, K) ndarray of bool
overlaps: (N, K) ndarray of overlap between boxes and query_boxes
"""
batch_size = gt_boxes.size(0)
if anchors.dim() == 2:
assert frm_mask == None, 'mask not implemented yet' # hasn't updated the mask yet
N = anchors.size(0)
K = gt_boxes.size(1)
anchors = anchors.view(1, N, 4).expand(batch_size, N, 4).contiguous()
gt_boxes = gt_boxes[:,:,:4].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 4).expand(batch_size, N, K, 4)
query_boxes = gt_boxes.view(batch_size, 1, K, 4).expand(batch_size, N, K, 4)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) -
torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) -
torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
overlaps = iw * ih / ua
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
elif anchors.dim() == 3:
N = anchors.size(1)
K = gt_boxes.size(1)
if anchors.size(2) == 5:
anchors = anchors[:,:,:5].contiguous()
else:
anchors = anchors[:,:,1:6].contiguous()
gt_boxes = gt_boxes[:,:,:5].contiguous()
gt_boxes_x = (gt_boxes[:,:,2] - gt_boxes[:,:,0] + 1)
gt_boxes_y = (gt_boxes[:,:,3] - gt_boxes[:,:,1] + 1)
gt_boxes_area = (gt_boxes_x * gt_boxes_y).view(batch_size, 1, K)
anchors_boxes_x = (anchors[:,:,2] - anchors[:,:,0] + 1)
anchors_boxes_y = (anchors[:,:,3] - anchors[:,:,1] + 1)
anchors_area = (anchors_boxes_x * anchors_boxes_y).view(batch_size, N, 1)
gt_area_zero = (gt_boxes_x == 1) & (gt_boxes_y == 1)
anchors_area_zero = (anchors_boxes_x == 1) & (anchors_boxes_y == 1)
boxes = anchors.view(batch_size, N, 1, 5).expand(batch_size, N, K, 5)
query_boxes = gt_boxes.view(batch_size, 1, K, 5).expand(batch_size, N, K, 5)
iw = (torch.min(boxes[:,:,:,2], query_boxes[:,:,:,2]) -
torch.max(boxes[:,:,:,0], query_boxes[:,:,:,0]) + 1)
iw[iw < 0] = 0
ih = (torch.min(boxes[:,:,:,3], query_boxes[:,:,:,3]) -
torch.max(boxes[:,:,:,1], query_boxes[:,:,:,1]) + 1)
ih[ih < 0] = 0
ua = anchors_area + gt_boxes_area - (iw * ih)
if frm_mask is not None:
# proposal and gt should be on the same frame to overlap
# frm_mask = ~frm_mask # bitwise not (~) does not work with uint8 in pytorch 1.3
frm_mask = 1 - frm_mask
# print('Percentage of proposals that are in the annotated frame: {}'.format(torch.mean(frm_mask.float())))
overlaps = iw * ih / ua
overlaps *= frm_mask.type(overlaps.type())
# mask the overlap here.
overlaps.masked_fill_(gt_area_zero.view(batch_size, 1, K).expand(batch_size, N, K), 0)
overlaps.masked_fill_(anchors_area_zero.view(batch_size, N, 1).expand(batch_size, N, K), -1)
else:
raise ValueError('anchors input dimension is not correct.')
return overlaps
def get_frm_mask(proposals, gt_bboxs):
# proposals: num_pps
# gt_bboxs: num_box
num_pps = proposals.shape[0]
num_box = gt_bboxs.shape[0]
return (np.tile(proposals.reshape(-1,1), (1,num_box)) != np.tile(gt_bboxs, (num_pps,1)))
| ActivityNet-Entities-main | scripts/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to preprocess the raw annotation output to NP/object annotation files
import os
import sys
import json
import argparse
import numpy as np
from collections import Counter, defaultdict
from stanfordcorenlp import StanfordCoreNLP
def define_split(database):
with open(args.train_cap_file) as f:
train_ids = json.load(f).keys()
with open(args.val_cap_file) as f:
valtest_ids = json.load(f).keys()
val_split = np.random.rand(len(valtest_ids))>=0.5 # split a half as the test split
val_ids = [valtest_ids[i] for i,j in enumerate(val_split) if j]
test_ids = [valtest_ids[i] for i,j in enumerate(val_split) if ~j]
vid_ids = set(database.keys())
train_ann_ids = vid_ids.intersection(set(train_ids))
val_ann_ids = vid_ids.intersection(set(val_ids))
test_ann_ids = vid_ids.intersection(set(test_ids))
print('All data - total: {}, train split: {}, val split: {}, test split: {}'.format(len(train_ids+val_ids+test_ids), len(train_ids), len(val_ids), len(test_ids)))
print('Annotated data - total: {}, train split: {}, val split: {}, and test split: {}'.format(
len(vid_ids), len(train_ann_ids), len(val_ann_ids), len(test_ann_ids)))
return [train_ids, val_ids, test_ids]
def extract_attr(database, splits):
split_dict = {}
for split in splits:
split_dict.update({s:s for s in split})
print('Object classes defined on {} videos, freq threshold is {}'.format(len(split_dict), args.freq_thresh))
attr_all = [] # all the attributes
for vid_id, vid in database.items():
if split_dict.get(vid_id, -1) != -1:
for seg_id, seg in vid['segments'].items():
for obj in seg['objects']:
assert(len(obj['frame_ind']) == 1)
for box_id, box in obj['frame_ind'].items():
tmp = []
attr_lst = []
sorted_attr = sorted(box['attributes'], key=lambda x:x[0]) # the attributes are unordered
for ind, attr in enumerate(sorted_attr):
assert(attr[0] >= 0)
if len(tmp) == 0:
tmp.append(attr[1].lower()) # convert to lowercase
else:
if attr[0] == (sorted_attr[ind-1][0]+1):
tmp.append(attr[1].lower())
else:
attr_lst.append(tmp)
tmp = [attr[1].lower()]
if len(tmp) > 0: # the last one
attr_lst.append(tmp)
# exclude empty box (no attribute)
# crowd boxes are ok for now
if len(attr_lst) == 0: # or box['crowds'] == 1
pass
# print('empty attribute at video {}, segment {}, box {}'.format(vid_id, seg_id, box_id))
else:
attr_all.extend([' '.join(i) for i in attr_lst])
return attr_all
def prep_all(database, database_cap, obj_cls_lst, w2l, nlp):
w2d = {}
for ind, obj in enumerate(obj_cls_lst):
w2d[obj] = ind
avg_box = [] # number of boxes per segment
avg_attr = [] # number of attributes per box
attr_all = [] # all the attributes
crowd_all = [] # all the crowd labels
attr_dict = defaultdict(list)
with open(args.attr_to_video_file) as f:
for line in f.readlines():
line_split = line.split(',')
attr_id = line_split[0]
vid_name = line_split[-1]
attr = ','.join(line_split[1:-1])
vid_id, seg_id = vid_name.strip().split('_segment_')
attr_dict[(vid_id, str(int(seg_id)))].append([int(attr_id), attr])
print('Number of segments with attributes: {}'.format(len(attr_dict)))
vid_seg_dict = {}
for vid_id, vid in database.items():
for seg_id, _ in vid['segments'].items():
vid_seg_dict[(vid_id, seg_id)] = vid_seg_dict.get((vid_id, seg_id), 0) + 1
new_database = {}
new_database_np = {}
seg_counter = 0
for vid_id, cap in database_cap.items():
new_database_np[vid_id] = {'segments':{}}
new_seg = {}
for cap_id in range(len(cap['sentences'])):
new_obj_lst = defaultdict(list)
seg_id = str(cap_id)
new_database_np[vid_id]['segments'][seg_id] = {'objects':[]}
if vid_seg_dict.get((vid_id, seg_id), 0) == 0:
new_obj_lst['tokens'] = nlp.word_tokenize(cap['sentences'][cap_id].encode('utf-8')) # sentences not in ANet-BB
else:
vid = database[vid_id]
seg = vid['segments'][seg_id]
# preprocess attributes
attr_sent = sorted(attr_dict[(vid_id, seg_id)], key=lambda x:x[0])
start_ind = attr_sent[0][0]
# legacy token issues from our annotation tool
for ind, tup in enumerate(attr_sent):
if attr_sent[ind][1] == '\\,':
attr_sent[ind][1] = ','
new_obj_lst['tokens'] = [i[1] for i in attr_sent] # all the word tokens
for obj in seg['objects']:
assert(len(obj['frame_ind']) == 1)
np_ann = {}
box_id = obj['frame_ind'].keys()[0]
box = obj['frame_ind'].values()[0]
np_ann['frame_ind'] = int(box_id)
np_ann.update(box)
if len(box['attributes']) > 0: # just in case the attribute is empty, though it should not be
tmp = []
tmp_ind = []
tmp_obj = []
attr_lst = []
attr_ind_lst = []
tmp_np_ind = []
np_lst = []
sorted_attr = sorted(box['attributes'], key=lambda x:x[0]) # the attributes are unordered
sorted_attr = [(x[0]-start_ind, x[1]) for x in sorted_attr] # index relative to the sent
for ind, attr in enumerate(sorted_attr):
assert(attr[0] >= 0)
attr_w = attr[1].lower()
if len(tmp) == 0:
tmp.append(attr_w) # convert to lowercase
tmp_np_ind.append(attr[0])
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj.append(attr_l)
tmp_ind.append(attr[0])
else:
if attr[0] == (sorted_attr[ind-1][0]+1):
tmp.append(attr_w)
tmp_np_ind.append(attr[0])
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj.append(attr_l)
tmp_ind.append(attr[0])
else:
np_lst.append([' '.join(tmp), tmp_np_ind])
if len(tmp_obj) >= 1:
attr_lst.append(tmp_obj[-1]) # the last noun is usually the head noun
attr_ind_lst.append(tmp_ind[-1])
tmp = [attr_w]
tmp_np_ind = [attr[0]]
if w2l.get(attr_w, -1) != -1:
attr_l = w2l[attr_w]
if w2d.get(attr_l, -1) != -1:
tmp_obj = [attr_l]
tmp_ind = [attr[0]]
else:
tmp_obj = []
tmp_ind = []
else:
tmp_obj = []
tmp_ind = []
if len(tmp) > 0: # the last one
np_lst.append([' '.join(tmp), tmp_np_ind])
if len(tmp_obj) >= 1:
attr_lst.append(tmp_obj[-1]) # the last noun is usually the head noun
attr_ind_lst.append(tmp_ind[-1])
assert(len(np_lst) > 0)
np_ann['noun_phrases'] = np_lst
np_ann.pop('attributes', None)
new_database_np[vid_id]['segments'][seg_id]['objects'].append(np_ann)
# exclude empty box (no attribute)
# crowd boxes are ok for now
if len(attr_lst) == 0: # or box['crowds'] == 1
pass
# print('empty attribute at video {}, segment {}, box {}'.format(vid_id, seg_id, box_id))
else:
new_obj_lst['process_bnd_box'].append([box['xtl'], box['ytl'], box['xbr'], box['ybr']])
new_obj_lst['frame_ind'].append(int(box_id))
new_obj_lst['crowds'].append(box['crowds'])
new_obj_lst['process_clss'].append(attr_lst)
new_obj_lst['process_idx'].append(attr_ind_lst)
avg_attr.append(len(attr_lst))
attr_all.extend([' '.join(i) for i in attr_lst])
crowd_all.append(box['crowds'])
avg_box.append(len(new_obj_lst['frame_ind'])) # cound be 0
if len(new_obj_lst['frame_ind']) == 0:
new_obj_lst['process_bnd_box'] = []
new_obj_lst['frame_ind'] = [] # all empty
new_obj_lst['crowds'] = []
new_obj_lst['process_clss'] = []
new_obj_lst['process_idx'] = []
seg_counter += 1
new_seg[seg_id] = new_obj_lst
new_database_np[vid_id]['segments'][seg_id]['tokens'] = new_obj_lst['tokens']
new_database[vid_id] = {'segments':new_seg}
# quick stats
print('Number of videos: {} (including empty ones)'.format(len(new_database)))
print('Number of segments: {}'.format(seg_counter))
print('Average number of valid segments per video: {}'.format(np.mean([len(vid['segments']) for vid_id, vid in new_database.items()])))
print('Average number of box per segment: {} and frequency: {}'.format(np.mean(avg_box), Counter(avg_box)))
print('Average number of attributes per box: {} and frequency: {} (for valid box only)'.format(np.mean(avg_attr), Counter(avg_attr)))
crowd_freq = Counter(crowd_all)
print('Percentage of crowds: {} (for valid box only)'.format(crowd_freq[1]*1./(crowd_freq[1]+crowd_freq[0])))
return new_database, new_database_np
def freq_obj_list(attr_all, nlp, props):
# generate a list of object classes
num_nn_per_attr = []
anet_obj_cls = []
nn_wo_noun = [] # noun phrases that contain no nouns
w2lemma = defaultdict(list)
for i, v in enumerate(attr_all):
if i%10000 == 0:
print(i)
out = json.loads(nlp.annotate(v.encode('utf-8'), properties=props))
assert(out['sentences'] > 0)
counter = 0
for token in out['sentences'][0]['tokens']:
if ('NN' in token['pos']) or ('PRP' in token['pos']):
lemma_w = token['lemma']
anet_obj_cls.append(lemma_w)
w2lemma[token['word']].append(lemma_w)
counter += 1
num_nn_per_attr.append(counter)
if counter == 0:
nn_wo_noun.append(v)
top_nn_wo_noun = Counter(nn_wo_noun)
print('Frequency of NPs w/o nouns:')
print(top_nn_wo_noun.most_common(10))
print('Frequency of number of nouns per attribute:')
print(Counter(num_nn_per_attr))
top_obj_cls = Counter(anet_obj_cls)
print('Top 10 objects:', top_obj_cls.most_common(20))
obj_cls_lst = []
for w,freq in top_obj_cls.items():
if freq >= args.freq_thresh:
obj_cls_lst.append(w.encode('ascii'))
w2l = {}
for w, l in w2lemma.items():
# manually correct some machine lemmatization mistakes
spec_w2l = {'outfits':'outfit', 'mariachi':'mariachi', 'barrios':'barrio', 'mans':'man', 'bags':'bag', 'aerobics':'aerobic', 'motobikes':'motobike', 'graffiti':'graffiti', 'semi':'semi', 'los':'los', 'tutus':'tutu'}
if spec_w2l.get(w, -1) != -1: # one special case...
w2l[w] = spec_w2l[w]
print('Ambiguous lemma for: {}'.format(w))
else:
assert(len(set(l)) == 1)
w2l[w] = list(set(l))[0]
print('Number of words derived from lemma visual words {}'.format(len(w2l)))
return obj_cls_lst, w2l
def main(args):
nlp = StanfordCoreNLP(args.corenlp_path)
props={'annotators': 'ssplit, tokenize, lemma','pipelineLanguage':'en', 'outputFormat':'json'}
# load anet captions
with open(args.train_cap_file) as f:
database_cap = json.load(f)
with open(args.val_cap_file) as f:
database_cap.update(json.load(f))
print('Number of videos in ActivityNet Captions (train+val): {}'.format(len(database_cap)))
# load raw annotation output anet bb
with open(args.src_file) as f:
database = json.load(f)['database']
print('Number of videos in ActivityNet-BB (train+val): {}'.format(len(database)))
if os.path.isfile(args.split_file):
with open(args.split_file) as f:
all_splits = json.load(f)
splits = [all_splits['training'], all_splits['validation'], all_splits['testing']]
else:
raise '[WARNING] Cannot find the split file! Uncomment this if you want to create a new split.'
splits = define_split(database)
all_splits = {'training':splits[0], 'validation':splits[1], 'testing':splits[2]}
with open(args.split_file, 'w') as f:
json.dump(all_splits, f)
attr_all = extract_attr(database, splits[:2]) # define object classes on train/val data
obj_cls_lst, w2l = freq_obj_list(attr_all, nlp, props)
new_database, new_database_np = prep_all(database, database_cap, obj_cls_lst, w2l, nlp)
# write raw annotation file
new_database_np = {'database':new_database_np}
with open(args.target_np_file, 'w') as f:
json.dump(new_database_np, f)
# write pre-processed annotation file
new_database = {'vocab':obj_cls_lst, 'annotations':new_database}
with open(args.target_file, 'w') as f:
json.dump(new_database, f)
nlp.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='ActivityNet-Entities dataset preprocessing script.')
parser.add_argument('--dataset_root', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/', help='dataset root directory')
parser.add_argument('--corenlp_path', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttn/tools/stanford-corenlp-full-2018-02-27', help='path to stanford core nlp toolkit')
parser.add_argument('--freq_thresh', type=int, default=50, help='frequency threshold for determining object classes')
parser.add_argument('--train_cap_file', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/raw_annotation_file/train.json')
parser.add_argument('--val_cap_file', type=str, default='/private/home/luoweizhou/subsystem/BottomUpAttnVid/data/anet/raw_annotation_file/val_1.json')
args = parser.parse_args()
args.src_file = args.dataset_root+'anet_bb.json' # the raw annotation file
args.target_np_file = args.dataset_root+'anet_entities.json' # output np file
args.target_file = args.dataset_root+'anet_entities_cleaned_class_thresh'+str(args.freq_thresh)+'.json' # output object file
args.attr_to_video_file = args.dataset_root+'attr_to_video.txt' # from annotation tool
args.split_file = args.dataset_root+'split_ids_anet_entities.json' # split file
np.random.seed(123) # make reproducible
main(args)
| ActivityNet-Entities-main | scripts/attr_prep_tag_NP.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
# Script to print stats on the object annotation file
import numpy as np
import json
import csv
# import visdom
import sys
from collections import Counter
src_file = sys.argv[1] # 'anet_entities_cleaned_class_thresh50_trainval.json'
dataset_file = sys.argv[2] # 'anet_captions_all_splits.json'
split_file = sys.argv[3] # 'split_ids_anet_entities.json'
if __name__=='__main__':
with open(src_file) as f:
data = json.load(f)['annotations']
with open(dataset_file) as f:
raw_data = json.load(f)
split_dict = {}
with open(split_file) as f:
split = json.load(f)
for s,ids in split.items():
split_dict.update({i:s for i in ids})
num_seg = np.sum([len(dat['segments']) for vid, dat in data.items()])
total_box = {}
total_dur = []
seg_splits = {}
box_per_seg = []
obj_per_box = []
count_obj = []
for vid, dat in data.items():
for seg, ann in dat['segments'].items():
total_box[split_dict[vid]] = total_box.get(split_dict[vid], 0)+len(ann['process_bnd_box'])
total_dur.append(float(raw_data[vid]['timestamps'][int(seg)][1]-raw_data[vid]['timestamps'][int(seg)][0]))
seg_splits[split_dict[vid]] = seg_splits.get(split_dict[vid], 0)+1
box_per_seg.append(len(ann['process_bnd_box']))
for c in ann['process_clss']:
obj_per_box.append(len(c))
count_obj.extend(c)
print('number of annotated video: {}'.format(len(data)))
print('number of annotated video segments: {}'.format(num_seg))
print('number of segments in each split: {}'.format(seg_splits))
print('total duration in hr: {}'.format(np.sum(total_dur)/3600))
print('total number of phrase (not object) boxes: {}'.format(total_box))
print('box per segment, mean {}, std {}, count {}'.format(np.mean(box_per_seg), np.std(box_per_seg), Counter(box_per_seg)))
print('object per box, mean {}, std {}, count {}'.format(np.mean(obj_per_box), np.std(obj_per_box), Counter(obj_per_box)))
print('Top 10 object labels: {}'.format(Counter(count_obj).most_common(10)))
"""
# visualization
vis = visdom.Visdom()
vis.histogram(X=[i for i in box_per_seg if i < 20],
opts={'numbins': 20, 'xtickmax':20, 'xtickmin':0, 'xmax':20, 'xmin':0, 'title':'Distribution of number of boxes per segment', 'xtickfont':{'size':14}, \
'ytickfont':{'size':14}, 'xlabel':'Number of boxes', 'ylabel': 'Counts'})
vis.histogram(X=[i for i in obj_per_box if i < 100],
opts={'numbins': 100, 'xtickmax':100, 'xtickmin':0, 'xmax':100, 'xmin':0, 'title':'Distribution of number of object labels per box', 'xtickfont':{'size':14}, \
'ytickfont':{'size':14}, 'xlabel':'Number of object labels', 'ylabel': 'Counts'})
"""
| ActivityNet-Entities-main | scripts/anet_entities_object_stats.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
class Path(object):
"""
User-specific path configuration.
Please complete the /path/to/* paths to point into valid directories.
"""
@staticmethod
def db_root_dir(database=''):
db_root = '/path/to/databases'
db_names = {'PASCAL_MT', 'NYUD_MT', 'BSDS500', 'NYUD_raw',
'PASCAL', 'COCO', 'FSV', 'MSRA10K', 'PASCAL-S'}
if database in db_names:
return os.path.join(db_root, database)
elif not database:
return db_root
else:
raise NotImplementedError
@staticmethod
def save_root_dir():
return './'
@staticmethod
def exp_dir():
return './'
@staticmethod
def models_dir():
return '/path/to/pre-trained/models/'
@staticmethod
def seism_root_dir():
# For edge detection evaluation (optional)
return '/path/to/seism'
| astmt-master | mypath.py |
astmt-master | experiments/__init__.py |
|
astmt-master | experiments/classification/__init__.py |
|
astmt-master | experiments/classification/imagenet/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import argparse
import os
import copy
import shutil
import time
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from fblib.util.classification.utils import convert_secs2time, time_string, time_file_str, AverageMeter
from fblib.networks.classification import se_resnet, mobilenet_v2, resnet, resnext
from fblib.util.mypath import Path
def parse_args():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('--data', default=Path.db_root_dir('Imagenet'),
help='path to dataset')
parser.add_argument('--arch', '-a', metavar='ARCH', default='x50')
parser.add_argument('-j', '--workers', default=16, type=int, metavar='N',
help='number of data loading workers (default: 16)')
parser.add_argument('--epochs', default=100, type=int, metavar='N',
help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=256, type=int, metavar='N',
help='mini-batch size (default: 256)')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float, metavar='LR',
help='initial learning rate')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
help='momentum')
parser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W',
help='weight decay (default: 1e-4)')
parser.add_argument('--print-freq', '-p', default=200, type=int, metavar='N',
help='print frequency (default: 100)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
help='evaluate model on validation set')
parser.add_argument('--n_gpu', type=int, default=8,
help='number of GPUs')
parser.add_argument('--group_norm', type=str2bool, default=False,
help='Group Normalization')
args = parser.parse_args()
args.prefix = time_file_str()
return args
def main():
args = parse_args()
best_prec1 = 0
if not args.group_norm:
save_dir = os.path.join(Path.exp_dir(), 'imagenet', args.arch)
else:
save_dir = os.path.join(Path.exp_dir(), 'imagenet', args.arch + '-GN')
if not os.path.isdir(save_dir):
os.makedirs(save_dir)
log = open(os.path.join(save_dir, '{}.{}.log'.format(args.arch, args.prefix)), 'w')
# create model
print_log("=> creating model '{}'".format(args.arch), log)
resol = 224
if args.arch == 'res26':
model = resnet.resnet26(pretrained=False, group_norm=args.group_norm)
elif args.arch == 'res50':
model = resnet.resnet50(pretrained=False, group_norm=args.group_norm)
elif args.arch == 'res101':
model = resnet.resnet101(pretrained=False, group_norm=args.group_norm)
elif args.arch == 'x50':
model = resnext.resnext50_32x4d(pretrained=False)
elif args.arch == 'x101':
model = resnext.resnext101_32x4d(pretrained=False)
elif args.arch == 'res26-se':
model = se_resnet.se_resnet26(num_classes=1000)
elif args.arch == 'res50-se':
model = se_resnet.se_resnet50(num_classes=1000)
elif args.arch == 'res101-se':
model = se_resnet.se_resnet101(num_classes=1000)
elif args.arch == 'mobilenet-v2':
model = mobilenet_v2.mobilenet_v2(pretrained=False, n_class=1000, last_channel=2048)
print_log("=> Model : {}".format(model), log)
print_log("=> parameter : {}".format(args), log)
if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
model.features = torch.nn.DataParallel(model.features, device_ids=list(range(args.n_gpu)))
model.cuda()
else:
model = torch.nn.DataParallel(model, device_ids=list(range(args.n_gpu))).cuda()
# define loss function (criterion) and optimizer
criterion = nn.CrossEntropyLoss().cuda()
optimizer = torch.optim.SGD(model.parameters(), args.lr,
momentum=args.momentum,
weight_decay=args.weight_decay,
nesterov=True)
# optionally resume from a checkpoint
if args.resume:
if os.path.isfile(args.resume):
print_log("=> loading checkpoint '{}'".format(args.resume), log)
checkpoint = torch.load(args.resume)
args.start_epoch = checkpoint['epoch']
best_prec1 = checkpoint['best_prec1']
model.load_state_dict(checkpoint['state_dict'])
optimizer.load_state_dict(checkpoint['optimizer'])
print_log("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']), log)
else:
raise ValueError("=> no checkpoint found at '{}'".format(args.resume))
cudnn.benchmark = True
# Data loading code
traindir = os.path.join(args.data, 'train')
valdir = os.path.join(args.data, 'val')
normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
train_dataset = datasets.ImageFolder(
traindir,
transforms.Compose([
transforms.RandomResizedCrop(resol),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
]))
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size, shuffle=True,
num_workers=args.workers, pin_memory=True, sampler=None)
val_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(valdir, transforms.Compose([
transforms.Resize(256),
transforms.CenterCrop(resol),
transforms.ToTensor(),
normalize,
])),
batch_size=args.batch_size, shuffle=False,
num_workers=args.workers, pin_memory=True)
if args.evaluate:
validate(val_loader, model, criterion)
return
filename = os.path.join(save_dir, 'checkpoint.{}.{}.pth.tar'.format(args.arch, args.prefix))
bestname = os.path.join(save_dir, 'best.{}.{}.pth.tar'.format(args.arch, args.prefix))
start_time = time.time()
epoch_time = AverageMeter()
for epoch in range(args.start_epoch, args.epochs):
lr = adjust_learning_rate(optimizer, epoch, args)
need_hour, need_mins, need_secs = convert_secs2time(epoch_time.val * (args.epochs - epoch))
need_time = '[Need: {:02d}:{:02d}:{:02d}]'.format(need_hour, need_mins, need_secs)
print_log(' [{:s}] :: {:3d}/{:3d} ----- [{:s}] {:s} LR={:}'.format(args.arch, epoch, args.epochs, time_string(),
need_time, lr), log)
# train for one epoch
train(train_loader, model, criterion, optimizer, epoch, log, args)
# evaluate on validation set
prec1 = validate(val_loader, model, criterion, log, args)
# remember best prec@1 and save checkpoint
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'arch': args.arch,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
'optimizer': optimizer.state_dict(),
'args': copy.deepcopy(args),
}, is_best, filename, bestname)
# measure elapsed time
epoch_time.update(time.time() - start_time)
start_time = time.time()
log.close()
def train(train_loader, model, criterion, optimizer, epoch, log, args):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target_var = target.cuda(non_blocking=True)
input_var = input.requires_grad_()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target_var, topk=(1, 5))
losses.update(loss.item(), input.size(0))
top1.update(prec1.item(), input.size(0))
top5.update(prec5.item(), input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5), log)
def validate(val_loader, model, criterion, log, args):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
with torch.no_grad():
for i, (input_var, target) in enumerate(val_loader):
target_var = target.cuda(non_blocking=True)
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target_var, topk=(1, 5))
losses.update(loss.item(), input_var.size(0))
top1.update(prec1.item(), input_var.size(0))
top5.update(prec5.item(), input_var.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
if i % args.print_freq == 0:
print_log('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5), log)
print_log(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f} Error@1 {error1:.3f}'.format(top1=top1, top5=top5,
error1=100 - top1.avg), log)
return top1.avg
def save_checkpoint(state, is_best, filename, bestname):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, bestname)
def adjust_learning_rate(optimizer, epoch, args):
"""Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
lr = args.lr * (0.1 ** (epoch // 30))
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return lr
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def print_log(print_string, log):
print("{}".format(print_string))
log.write('{}\n'.format(print_string))
log.flush()
if __name__ == '__main__':
main()
| astmt-master | experiments/classification/imagenet/train.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torchvision import transforms
from torch.utils.data import DataLoader
from fblib.util.helpers import worker_seed
import fblib.util.pdf_visualizer as viz
# Losses
from fblib.layers.loss import BalancedCrossEntropyLoss, SoftMaxwithLoss, NormalsLoss, DepthLoss
# Dataloaders
import fblib.dataloaders as dbs
from fblib.dataloaders.combine_im_dbs import CombineIMDBs
from fblib.layers.loss import normal_ize
# Transformations
from fblib.dataloaders import custom_transforms as tr
# Collate for MIL
from fblib.util.custom_collate import collate_mil
def accuracy(output, target, topk=(1,), ignore_label=255):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = (target != ignore_label).sum().item()
if batch_size == 0:
return -1
_, pred = output.topk(maxk, 1, True, True)
if pred.shape[-1] == 1:
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
else:
correct = pred.eq(target.unsqueeze(1))
res = []
for _ in topk:
correct_k = correct[:].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def eval_all_results(p):
if 'DO_SEMSEG' in p and p.DO_SEMSEG:
from fblib.evaluation.eval_semseg import eval_and_store_semseg
for db in p['infer_db_names']:
eval_and_store_semseg(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_HUMAN_PARTS' in p and p.DO_HUMAN_PARTS:
from fblib.evaluation.eval_human_parts import eval_and_store_human_parts
for db in p['infer_db_names']:
eval_and_store_human_parts(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_NORMALS' in p and p.DO_NORMALS:
from fblib.evaluation.eval_normals import eval_and_store_normals
for db in p['infer_db_names']:
eval_and_store_normals(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_SAL' in p and p.DO_SAL:
from fblib.evaluation.eval_sal import eval_and_store_sal
for db in p['infer_db_names']:
eval_and_store_sal(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_DEPTH' in p and p.DO_DEPTH:
from fblib.evaluation.eval_depth import eval_and_store_depth
for db in p['infer_db_names']:
eval_and_store_depth(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_ALBEDO' in p and p.DO_ALBEDO:
from fblib.evaluation.eval_albedo import eval_and_store_albedo
for db in p['infer_db_names']:
eval_and_store_albedo(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
overfit=p.overfit)
if 'DO_EDGE' in p and p.DO_EDGE and p['eval_edge']:
from fblib.evaluation.eval_edges import sync_and_evaluate_one_folder
for db in p['infer_db_names']:
sync_and_evaluate_one_folder(database=db,
save_dir=p['save_dir_root'],
exp_name=p['exp_name'],
prefix=p['tasks_name'],
all_tasks_present=(p.MINI if 'MINI' in p else False))
def get_transformations(p):
"""
Get the transformations for training and testing
"""
# Training transformations
# Horizontal flips with probability of 0.5
transforms_tr = [tr.RandomHorizontalFlip()]
# Rotations and scaling
transforms_tr.extend([tr.ScaleNRotate(rots=(-20, 20), scales=(.75, 1.25),
flagvals={x: p.TASKS.FLAGVALS[x] for x in p.TASKS.FLAGVALS})])
# Fixed Resize to input resolution
transforms_tr.extend([tr.FixedResize(resolutions={x: tuple(p.TRAIN.SCALE) for x in p.TASKS.FLAGVALS},
flagvals={x: p.TASKS.FLAGVALS[x] for x in p.TASKS.FLAGVALS})])
transforms_tr.extend([tr.AddIgnoreRegions(), tr.ToTensor()])
transforms_tr = transforms.Compose(transforms_tr)
# Testing (during training transforms)
transforms_ts = []
transforms_ts.extend([tr.FixedResize(resolutions={x: tuple(p.TEST.SCALE) for x in p.TASKS.FLAGVALS},
flagvals={x: p.TASKS.FLAGVALS[x] for x in p.TASKS.FLAGVALS})])
transforms_ts.extend([tr.AddIgnoreRegions(), tr.ToTensor()])
transforms_ts = transforms.Compose(transforms_ts)
# Transformations to be used during inference
transforms_infer = transforms_ts
return transforms_tr, transforms_ts, transforms_infer
def get_loss(p, task=None):
if task == 'edge':
criterion = BalancedCrossEntropyLoss(size_average=True, pos_weight=p['edge_w'])
elif task == 'semseg' or task == 'human_parts':
criterion = SoftMaxwithLoss()
elif task == 'normals':
criterion = NormalsLoss(normalize=True, size_average=True, norm=p['normloss'])
elif task == 'sal':
criterion = BalancedCrossEntropyLoss(size_average=True)
elif task == 'depth':
criterion = DepthLoss()
elif task == 'albedo':
criterion = torch.nn.L1Loss(reduction='elementwise_mean')
else:
raise NotImplementedError('Undefined Loss: Choose a task among '
'edge, semseg, human_parts, sal, depth, albedo, or normals')
return criterion
def get_train_loader(p, db_name, transforms):
print('Preparing train loader for db: {}'.format(db_name))
db_names = [db_name] if isinstance(db_name, str) else db_name
dbs_train = {}
for db in db_names:
if db == 'PASCALContext':
dbs_train[db] = dbs.PASCALContext(split=['train'], transform=transforms, retname=True,
do_edge=p.DO_EDGE, do_human_parts=p.DO_HUMAN_PARTS,
do_semseg=p.DO_SEMSEG, do_normals=p.DO_NORMALS, do_sal=p.DO_SAL,
overfit=p['overfit'])
elif db == 'VOC12':
dbs_train[db] = dbs.VOC12(split=['train'], transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, overfit=p['overfit'])
elif db == 'SBD':
dbs_train[db] = dbs.SBD(split=['train', 'val'], transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, overfit=p['overfit'])
elif db == 'NYUD_nrm':
dbs_train[db] = dbs.NYUDRaw(split='train', transform=transforms, overfit=p['overfit'])
elif db == 'NYUD':
dbs_train[db] = dbs.NYUD_MT(split='train', transform=transforms, do_edge=p.DO_EDGE, do_semseg=p.DO_SEMSEG,
do_normals=p.DO_NORMALS, do_depth=p.DO_DEPTH, overfit=p['overfit'])
elif db == 'COCO':
dbs_train[db] = dbs.COCOSegmentation(split='train2017', transform=transforms, retname=True,
area_range=[1000, float("inf")], only_pascal_categories=True,
overfit=p['overfit'])
elif db == 'FSV':
dbs_train[db] = dbs.FSVGTA(split='train', mini=False, transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, do_albedo=p.DO_ALBEDO, do_depth=p.DO_DEPTH,
overfit=p['overfit'])
else:
raise NotImplemented("train_db_name: Choose among BSDS500, PASCALContext, VOC12, COCO, FSV, and NYUD")
if len(dbs_train) == 1:
db_train = dbs_train[list(dbs_train.keys())[0]]
else:
db_exclude = dbs.VOC12(split=['val'], transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, overfit=p['overfit'])
db_train = CombineIMDBs([dbs_train[x] for x in dbs_train], excluded=[db_exclude], repeat=[1, 1])
trainloader = DataLoader(db_train, batch_size=p['trBatch'], shuffle=True, drop_last=True,
num_workers=4, worker_init_fn=worker_seed, collate_fn=collate_mil)
return trainloader
def get_test_loader(p, db_name, transforms, infer=False):
print('Preparing test loader for db: {}'.format(db_name))
if db_name == 'BSDS500':
db_test = dbs.BSDS500(split=['test'], transform=transforms, overfit=p['overfit'])
elif db_name == 'PASCALContext':
db_test = dbs.PASCALContext(split=['val'], transform=transforms,
retname=True, do_edge=p.DO_EDGE, do_human_parts=p.DO_HUMAN_PARTS,
do_semseg=p.DO_SEMSEG, do_normals=p.DO_NORMALS, do_sal=p.DO_SAL,
overfit=p['overfit'])
elif db_name == 'VOC12':
db_test = dbs.VOC12(split=['val'], transform=transforms,
retname=True, do_semseg=p.DO_SEMSEG, overfit=p['overfit'])
elif db_name == 'NYUD':
db_test = dbs.NYUD_MT(split='val', transform=transforms, do_edge=p.DO_EDGE, do_semseg=p.DO_SEMSEG,
do_normals=p.DO_NORMALS, do_depth=p.DO_DEPTH, overfit=p['overfit'])
elif db_name == 'COCO':
db_test = dbs.COCOSegmentation(split='val2017', transform=transforms, retname=True,
area_range=[1000, float("inf")], only_pascal_categories=True,
overfit=p['overfit'])
elif db_name == 'FSV':
db_test = dbs.FSVGTA(split='test', mini=True, transform=transforms, retname=True,
do_semseg=p.DO_SEMSEG, do_albedo=p.DO_ALBEDO, do_depth=p.DO_DEPTH,
overfit=p['overfit'])
else:
raise NotImplemented("test_db_name: Choose among BSDS500, PASCALContext, VOC12, COCO, FSV, and NYUD")
drop_last = False if infer else True
testloader = DataLoader(db_test, batch_size=p.TEST.BATCH_SIZE, shuffle=False, drop_last=drop_last,
num_workers=2, worker_init_fn=worker_seed)
return testloader
def get_output(output, task):
output = output.permute(0, 2, 3, 1)
if task == 'normals':
output = (normal_ize(output, dim=3) + 1.0) * 255 / 2.0
elif task in {'semseg', 'human_parts'}:
_, output = torch.max(output, dim=3)
elif task in {'edge', 'sal'}:
output = torch.squeeze(255 * 1 / (1 + torch.exp(-output)))
elif task in {'depth'}:
pass
else:
raise ValueError('Select one of the valid tasks')
return output.cpu().data.numpy()
| astmt-master | experiments/dense_predict/common_configs.py |
MAX_N_IMAGES_PER_GPU = {
'res26-8': 8,
'res26-16': 12,
'res50-8': 8,
'res50-16': 10,
'res101-8': 4,
'res101-16': 10,
'x50-8': 4,
'x50-16': 10,
'x101-8': 2,
'x101-16': 6,
}
| astmt-master | experiments/dense_predict/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import cv2
import argparse
import torch
import tarfile
from six.moves import urllib
from easydict import EasyDict as edict
# Networks
import fblib.networks.deeplab_multi_task.deeplab_se_resnet_multitask as se_resnet_multitask
# Common configs
from experiments.dense_predict.common_configs import get_loss, get_train_loader, get_test_loader, get_transformations
from fblib.util.mypath import Path
def parse_args():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Multi-task learning with PASCAL')
# Select tasks
parser.add_argument('--active_tasks', type=int, nargs='+', default=[1, 1, 1, 1, 1],
help='Which tasks to train?')
parser.add_argument('--onlyVOC', type=str2bool, default=False,
help='Use standard training and testing for semantic segmentation')
# General parameters
parser.add_argument('--arch', type=str, default='se_res26',
help='network: se_res26, se_res50, se_res101')
parser.add_argument('--pretr', type=str, default='imagenet',
help='pre-trained model: "imagenet" or "scratch"')
parser.add_argument('--trBatch', type=int, default=8,
help='training batch size')
parser.add_argument('-lr', type=float, default=0.001,
help='initial learning rate. poly-learning rate is used.')
parser.add_argument('--lr_dec', type=float, default=1,
help='decoder learning rate multiplier')
parser.add_argument('-wd', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--epochs', type=int, default=60,
help='Total number of epochs for training')
parser.add_argument('--resume_epoch', type=int, default=0,
help='Resume Epoch #')
parser.add_argument('--cls', type=str, default='atrous-v3',
help='Classifier type')
parser.add_argument('--stride', type=int, default=16,
help='Output stride of ResNet backbone. If set to 16 saves significant memory')
parser.add_argument('--trNorm', type=str2bool, default=True,
help='train normalization layers of Backbone?')
parser.add_argument('--dec_w', type=int, default=64,
help='decoder width (default 256 in Deeplab v3+')
parser.add_argument('--overfit', type=str2bool, default=False,
help='overfit to small subset of data for debugging purposes')
# Modulation Parameters
parser.add_argument('--seenc', type=str2bool, default=True,
help='Squeeze and excitation per task for encoder? False will still use 1 SE for all tasks')
parser.add_argument('--sedec', type=str2bool, default=True,
help='Squeeze and excitation per task for decoder? False will not use SE modules')
parser.add_argument('--adapt', type=str2bool, default=True,
help='Use parallel residual adapters?')
parser.add_argument('--lr_tsk', type=float, default=-1,
help='Task Specific layer learning rate multiplier')
# Discriminator parameters
parser.add_argument('--dscr', type=str, default='fconv',
help='Use discriminator?')
parser.add_argument('--lr_dscr', type=int, default=10,
help='learning rate multiplier of discriminator?')
parser.add_argument('--dscr_w', type=float, default=0.01,
help='weight of discriminator in the range [0, 1]')
parser.add_argument('--dscrd', type=int, default=2,
help='Depth of discriminator')
parser.add_argument('--dscrk', type=int, default=1,
help='kernel size of discriminator')
# Task-specific parameters
parser.add_argument('--edge_w', type=float, default=0.95,
help='weighting the positive loss for boundary detection as w * L_pos + (1 - w) * L_neg')
return parser.parse_args()
def create_config():
cfg = edict()
args = parse_args()
# Parse tasks
assert (len(args.active_tasks) == 5)
args.do_edge = args.active_tasks[0]
args.do_semseg = args.active_tasks[1]
args.do_human_parts = args.active_tasks[2]
args.do_normals = args.active_tasks[3]
args.do_sal = args.active_tasks[4]
print('\nThis script was run with the following parameters:')
for x in vars(args):
print('{}: {}'.format(x, str(getattr(args, x))))
cfg.resume_epoch = args.resume_epoch
cfg.DO_EDGE = args.do_edge
cfg.DO_SEMSEG = args.do_semseg
cfg.DO_HUMAN_PARTS = args.do_human_parts
cfg.DO_NORMALS = args.do_normals
cfg.DO_SAL = args.do_sal
if not cfg.DO_EDGE and not cfg.DO_SEMSEG and not cfg.DO_HUMAN_PARTS and not cfg.DO_NORMALS and not cfg.DO_SAL:
raise ValueError("Select at least one task")
cfg['arch'] = args.arch
cfg['pretr'] = args.pretr
cfg['trBatch'] = args.trBatch
cfg['lr'] = args.lr
cfg['lr_dec'] = args.lr_dec
cfg['wd'] = args.wd
cfg['cls'] = args.cls
cfg['epochs'] = args.epochs
cfg['stride'] = args.stride
cfg['trNorm'] = args.trNorm
cfg['dec_w'] = args.dec_w
# Set Modulation (Squeeze and Exciation, Residual Adapters) parameters
cfg['seenc'] = args.seenc
cfg['sedec'] = args.sedec
cfg['adapters'] = args.adapt
if cfg['sedec']:
cfg['norm_per_task'] = True
else:
cfg['norm_per_task'] = False
if args.dscr == 'None':
args.dscr = None
cfg['dscr_type'] = args.dscr
cfg['lr_dscr'] = args.lr_dscr
cfg['dscr_w'] = args.dscr_w
cfg['dscrd'] = args.dscrd
cfg['dscrk'] = args.dscrk
task_args, name_args = get_exp_name(args)
cfg['exp_folder_name'] = 'pascal_resnet'
cfg['exp_name'] = "_".join(name_args)
cfg['tasks_name'] = "_".join(task_args)
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'], cfg['tasks_name'])
if args.onlyVOC:
cfg['train_db_name'] = ['VOC12', 'SBD']
cfg['test_db_name'] = 'VOC12'
cfg['infer_db_names'] = ['VOC12', ]
else:
cfg['train_db_name'] = ['PASCALContext', ]
cfg['test_db_name'] = 'PASCALContext'
cfg['infer_db_names'] = ['PASCALContext', ]
# Which tasks?
cfg.TASKS = edict()
cfg.TASKS.NAMES = []
cfg.TASKS.NUM_OUTPUT = {} # How many outputs per task?
cfg.TASKS.TB_MIN = {}
cfg.TASKS.TB_MAX = {}
cfg.TASKS.LOSS_MULT = {}
cfg.TASKS.FLAGVALS = {'image': cv2.INTER_CUBIC}
cfg.TASKS.INFER_FLAGVALS = {}
if cfg.DO_EDGE:
# Edge Detection
print('Adding task: Edge Detection')
tmp = 'edge'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp]
cfg.TASKS.LOSS_MULT[tmp] = 50
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
# Add task-specific parameters from parser
cfg['edge_w'] = args.edge_w
cfg['eval_edge'] = False
if cfg.DO_SEMSEG:
# Semantic Segmentation
print('Adding task: Semantic Segmentation')
tmp = 'semseg'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 21
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 1
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_HUMAN_PARTS:
# Human Parts Segmentation
print('Adding task: Human Part Segmentation')
tmp = 'human_parts'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 7
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 2
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_NORMALS:
# Human Parts Segmentation
print('Adding task: Normals')
tmp = 'normals'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 3
cfg.TASKS.TB_MIN[tmp] = -1
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 10
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_CUBIC
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['normloss'] = 1 # Hard-coded L1 loss for normals
if cfg.DO_SAL:
# Saliency Estimation
print('Adding task: Saliency')
tmp = 'sal'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 5
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['lr_tsk'] = len(cfg.TASKS.NAMES) if args.lr_tsk < 0 else args.lr_tsk
cfg.NETWORK = edict()
# Visualize the network on Tensorboard / pdf?
cfg.NETWORK.VIS_NET = False
cfg.TRAIN = edict()
cfg.TRAIN.SCALE = (512, 512)
cfg.TRAIN.MOMENTUM = 0.9
cfg.TRAIN.TENS_VIS = True
cfg.TRAIN.TENS_VIS_INTER = 1000
cfg.TRAIN.TEMP_LOSS_INTER = 1000
cfg.TEST = edict()
# See evolution of the test set when training?
cfg.TEST.USE_TEST = True
cfg.TEST.TEST_INTER = 10
cfg.TEST.SCALE = (512, 512)
cfg.SEED = 0
cfg.EVALUATE = True
cfg.DEBUG = False
cfg['overfit'] = args.overfit
if cfg['overfit']:
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'])
cfg['exp_name'] = 'test'
cfg['save_dir'] = os.path.join(cfg['save_dir_root'], cfg['exp_name'])
return cfg
def check_downloaded(p):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
def _create_url(name):
return 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/astmt/{}.tgz'.format(name)
_model_urls = {
'pascal_resnet_edge_semseg_human_parts_normals_sal_'
'arch-se_res26_pretr-imagenet_trBatch-8_lr-0.001_epochs-60_trNorm_seenc_sedec_adapt_dscr-fconv_lr_dscr'
'-10_dscr_w-0.01_dscrd-2_dscrk-1_edge_w-0.95_60',
}
ans = False
_check = p['exp_folder_name'] + '_' + p['tasks_name'] + '_' + p['exp_name'] + '_' + str(p['resume_epoch'])
_fpath = os.path.join(Path.exp_dir(), _check + '.tgz')
if _check in _model_urls:
if not os.path.isfile(os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')):
urllib.request.urlretrieve(_create_url(_check), _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.exp_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
ans = True
return ans
def get_net_resnet(p):
"""
Define the network (standard Deeplab ResNet101) and the trainable parameters
"""
if p['arch'] == 'se_res26':
network = se_resnet_multitask.se_resnet26
elif p['arch'] == 'se_res50':
network = se_resnet_multitask.se_resnet50
elif p['arch'] == 'se_res101':
network = se_resnet_multitask.se_resnet101
else:
raise NotImplementedError('ResNet: Choose between among se_res26, se_res50, and se_res101')
print('Creating ResNet model: {}'.format(p.NETWORK))
net = network(tasks=p.TASKS.NAMES, n_classes=p.TASKS.NUM_OUTPUT, pretrained=p['pretr'], classifier=p['cls'],
output_stride=p['stride'], train_norm_layers=p['trNorm'], width_decoder=p['dec_w'],
squeeze_enc=p['seenc'], squeeze_dec=p['sedec'], adapters=p['adapters'],
norm_per_task=p['norm_per_task'], dscr_type=p['dscr_type'], dscr_d=p['dscrd'], dscr_k=p['dscrk'])
if p['resume_epoch'] != 0:
print("Initializing weights from: {}".format(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')))
state_dict_checkpoint = torch.load(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')
, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict_checkpoint)
return net
def get_train_params(net, p):
train_params = [{'params': se_resnet_multitask.get_lr_params(net, part='backbone', tasks=p.TASKS.NAMES),
'lr': p['lr']},
{'params': se_resnet_multitask.get_lr_params(net, part='decoder', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_dec']},
{'params': se_resnet_multitask.get_lr_params(net, part='task_specific', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_tsk']}]
if p['dscr_type'] is not None:
train_params.append(
{'params': se_resnet_multitask.get_lr_params(net, part='discriminator', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_dscr']})
return train_params
def get_exp_name(args):
"""
Creates the name experiment from the configuration file and the arguments
"""
task_dict = {
'do_edge': 0,
'do_semseg': 0,
'do_human_parts': 0,
'do_normals': 0,
'do_sal': 0
}
name_dict = {
'arch': None,
'onlyVOC': False,
'pretr': None,
'trBatch': None,
'lr': None,
'wd': 1e-04,
'epochs': None,
'cls': 'atrous-v3',
'stride': 16,
'trNorm': False,
'dec_w': 64,
'seenc': False,
'sedec': False,
'adapt': False,
'dscr': None,
'lr_dscr': 1,
'dscr_w': ('dscr', None),
'dscrd': ('dscr', None),
'dscrk': ('dscr', None),
'edge_w': ('do_edge', None),
'lr_dec': 1,
'lr_tsk': -1,
}
# Experiment folder (task) string
task_args = [x.replace('do_', '') for x in task_dict if getattr(args, x) != task_dict[x]]
# Experiment name string
name_args = []
for x in name_dict:
# Check dependencies in tuple
if isinstance(name_dict[x], tuple):
elem = name_dict if name_dict[x][0] in name_dict else task_dict
if elem[name_dict[x][0]] == getattr(args, name_dict[x][0]):
continue
if getattr(args, x) != name_dict[x]:
tmp = getattr(args, x)
if isinstance(tmp, list):
tmp = "_".join([str(x) for x in tmp])
else:
tmp = str(tmp)
name_args.append(x + '-' + tmp)
name_args = [x.replace('-True', '') for x in name_args]
return task_args, name_args
| astmt-master | experiments/dense_predict/pascal_resnet/config.py |
MAX_N_IMAGES_PER_GPU = {
'se_res26-8': 10,
'se_res26-16': 16,
'se_res50-8': 8,
'se_res50-16': 10,
'se_res101-8': 2,
'se_res101-16': 8,
}
| astmt-master | experiments/dense_predict/pascal_resnet/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import socket
import timeit
import cv2
from datetime import datetime
import imageio
import numpy as np
# PyTorch includes
import torch
import torch.optim as optim
from torch.nn.functional import interpolate
# Custom includes
from fblib.util.helpers import generate_param_report
from fblib.util.dense_predict.utils import lr_poly
from experiments.dense_predict import common_configs
from fblib.util.mtl_tools.multitask_visualizer import TBVisualizer, visualize_network
from fblib.util.model_resources.flops import compute_gflops
from fblib.util.model_resources.num_parameters import count_parameters
from fblib.util.dense_predict.utils import AverageMeter
# Custom optimizer
from fblib.util.optimizer_mtl.select_used_modules import make_closure
# Configuration file
from experiments.dense_predict.pascal_resnet import config as config
# Tensorboard include
from tensorboardX import SummaryWriter
def main():
p = config.create_config()
gpu_id = 0
device = torch.device("cuda:" + str(gpu_id) if torch.cuda.is_available() else "cpu")
p.TEST.BATCH_SIZE = 32
# Setting parameters
n_epochs = p['epochs']
print("Total training epochs: {}".format(n_epochs))
print(p)
print('Training on {}'.format(p['train_db_name']))
snapshot = 10 # Store a model every snapshot epochs
test_interval = p.TEST.TEST_INTER # Run on test set every test_interval epochs
torch.manual_seed(p.SEED)
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if not os.path.exists(os.path.join(p['save_dir'], 'models')):
if p['resume_epoch'] == 0:
os.makedirs(os.path.join(p['save_dir'], 'models'))
else:
if not config.check_downloaded(p):
print('Folder does not exist.No checkpoint to resume from. Exiting')
exit(1)
net = config.get_net_resnet(p)
# Visualize the network
if p.NETWORK.VIS_NET:
visualize_network(net, p)
gflops = compute_gflops(net, in_shape=(p['trBatch'], 3, p.TRAIN.SCALE[0], p.TRAIN.SCALE[1]),
tasks=p.TASKS.NAMES[0])
print('GFLOPS per task: {}'.format(gflops / p['trBatch']))
print('\nNumber of parameters (in millions): {0:.3f}'.format(count_parameters(net) / 1e6))
print('Number of parameters (in millions) for decoder: {0:.3f}\n'.format(count_parameters(net.decoder) / 1e6))
net.to(device)
if p['resume_epoch'] != n_epochs:
criteria_tr = {}
criteria_ts = {}
running_loss_tr = {task: 0. for task in p.TASKS.NAMES}
running_loss_ts = {task: 0. for task in p.TASKS.NAMES}
curr_loss_task = {task: 0. for task in p.TASKS.NAMES}
counter_tr = {task: 0 for task in p.TASKS.NAMES}
counter_ts = {task: 0 for task in p.TASKS.NAMES}
# Discriminator loss variables for logging
running_loss_tr_dscr = 0
running_loss_ts_dscr = 0
# Logging into Tensorboard
log_dir = os.path.join(p['save_dir'], 'models',
datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
# Training parameters and their optimizer
train_params = config.get_train_params(net, p)
optimizer = optim.SGD(train_params, lr=p['lr'], momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
for task in p.TASKS.NAMES:
# Losses
criteria_tr[task] = config.get_loss(p, task)
criteria_ts[task] = config.get_loss(p, task)
criteria_tr[task].to(device)
criteria_ts[task].to(device)
# Preparation of the data loaders
transforms_tr, transforms_ts, _ = config.get_transformations(p)
trainloader = config.get_train_loader(p, db_name=p['train_db_name'], transforms=transforms_tr)
testloader = config.get_test_loader(p, db_name=p['test_db_name'], transforms=transforms_ts)
# TensorBoard Image Visualizer
tb_vizualizer = TBVisualizer(tasks=p.TASKS.NAMES, min_ranges=p.TASKS.TB_MIN, max_ranges=p.TASKS.TB_MAX,
batch_size=p['trBatch'])
generate_param_report(os.path.join(p['save_dir'], exp_name + '.txt'), p)
# Train variables
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
print("Training Network")
# Main Training and Testing Loop
for epoch in range(p['resume_epoch'], n_epochs):
top1_dscr = AverageMeter()
start_time = timeit.default_timer()
# One training epoch
net.train()
alpha = 2. / (1. + np.exp(-10 * ((epoch + 1) / n_epochs))) - 1 # Ganin et al for gradient reversal
if p['dscr_type'] is not None:
print('Value of alpha: {}'.format(alpha))
for ii, sample in enumerate(trainloader):
curr_loss_dscr = 0
# Grab the input
inputs = sample['image']
inputs.requires_grad_()
inputs = inputs.to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
loss_tasks = losses_tasks[task]
running_loss_tr[task] += losses_tasks[task].item()
curr_loss_task[task] = losses_tasks[task].item()
counter_tr[task] += 1
# Store output for logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
# measure loss, accuracy and record accuracy for discriminator
loss_dscr = losses_dscr[task]
running_loss_tr_dscr += losses_dscr[task].item()
curr_loss_dscr += loss_dscr.item()
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
loss = (1 - p['dscr_w']) * loss_tasks + p['dscr_w'] * loss_dscr
else:
loss = loss_tasks
# Backward pass inside make_closure to update only weights that were used during fw pass
optimizer.zero_grad()
optimizer.step(closure=make_closure(loss=loss, net=net))
# Print stuff and log epoch loss into Tensorboard
if ii % num_img_tr == num_img_tr - 1:
print('[Epoch: %d, numImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_tr[task] = running_loss_tr[task] / counter_tr[task]
writer.add_scalar('data/total_loss_epoch' + task,
running_loss_tr[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Loss %s: %f' % (task, running_loss_tr[task]))
running_loss_tr[task] = 0
counter_tr[task] = 0
if p['dscr_type'] is not None:
running_loss_tr_dscr = running_loss_tr_dscr / num_img_tr / len(p.TASKS.NAMES)
writer.add_scalar('data/total_loss_epoch_dscr', running_loss_tr_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_tr_dscr)
print('Train Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
writer.add_scalar('data/train_accuracy_dscr', top1_dscr.avg, epoch)
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log current train loss into Tensorboard
for task in p.TASKS.NAMES:
writer.add_scalar('data/train_loss_iter_' + task, curr_loss_task[task], ii + num_img_tr * epoch)
curr_loss_task[task] = 0.
if p['dscr_type'] is not None:
writer.add_scalar('data/train_loss_dscr_iter', curr_loss_dscr, ii + num_img_tr * epoch)
curr_loss_dscr = 0.
# Log train images to Tensorboard
if p['overfit'] and p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='train')
if ii % num_img_tr == num_img_tr - 1:
lr_ = lr_poly(p['lr'], iter_=epoch, max_iter=n_epochs)
print('(poly lr policy) learning rate: {0:.6f}'.format(lr_))
train_params = config.get_train_params(net, p)
optimizer = optim.SGD(train_params, lr=lr_, momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
optimizer.zero_grad()
# Save the model
if (epoch % snapshot) == snapshot - 1 and epoch != 0:
torch.save(net.state_dict(), os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(epoch) + '.pth'))
# One testing epoch
if p.TEST.USE_TEST and epoch % test_interval == (test_interval - 1):
print('Testing Phase')
top1_dscr = AverageMeter()
net.eval()
start_time = timeit.default_timer()
for ii, sample in enumerate(testloader):
inputs = sample['image'].to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# forward pass of the mini-batch
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
running_loss_ts[task] += losses_tasks[task].item()
counter_ts[task] += 1
# for logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
running_loss_ts_dscr += losses_dscr[task].item()
# measure accuracy and record loss for discriminator
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
# Print stuff
if ii % num_img_ts == num_img_ts - 1:
print('[Epoch: %d, numTestImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_ts[task] = running_loss_ts[task] / counter_ts[task]
writer.add_scalar('data/test_loss_' + task + '_epoch',
running_loss_ts[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Testing Loss %s: %f' % (task, running_loss_ts[task]))
running_loss_ts[task] = 0
counter_ts[task] = 0
# Free the graph
losses_tasks = {}
if p['dscr_type'] is not None:
running_loss_ts_dscr = running_loss_ts_dscr / num_img_ts / len(p.TASKS.NAMES)
writer.add_scalar('data/test_loss_dscr', running_loss_ts_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_ts_dscr)
writer.add_scalar('data/test_accuracy_dscr', top1_dscr.avg, epoch)
print('Test Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
# Free the graph
losses_dscr = {}
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log test images to Tensorboard
if p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='test')
writer.close()
# Generate Results
net.eval()
_, _, transforms_infer = config.get_transformations(p)
for db_name in p['infer_db_names']:
testloader = config.get_test_loader(p, db_name=db_name, transforms=transforms_infer, infer=True)
save_dir_res = os.path.join(p['save_dir'], 'Results_' + db_name)
print('Testing Network')
# Main Testing Loop
with torch.no_grad():
for ii, sample in enumerate(testloader):
img, meta = sample['image'], sample['meta']
# Forward pass of the mini-batch
inputs = img.to(device)
tasks = net.tasks
for task in tasks:
output, _ = net.forward(inputs, task=task)
save_dir_task = os.path.join(save_dir_res, task)
if not os.path.exists(save_dir_task):
os.makedirs(save_dir_task)
output = interpolate(output, size=(inputs.size()[-2], inputs.size()[-1]),
mode='bilinear', align_corners=False)
output = common_configs.get_output(output, task)
for jj in range(int(inputs.size()[0])):
if len(sample[task][jj].unique()) == 1 and sample[task][jj].unique() == 255:
continue
fname = meta['image'][jj]
result = cv2.resize(output[jj], dsize=(meta['im_size'][1][jj], meta['im_size'][0][jj]),
interpolation=p.TASKS.INFER_FLAGVALS[task])
imageio.imwrite(os.path.join(save_dir_task, fname + '.png'), result.astype(np.uint8))
if p.EVALUATE:
common_configs.eval_all_results(p)
if __name__ == '__main__':
main()
| astmt-master | experiments/dense_predict/pascal_resnet/main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import cv2
import argparse
import torch
import tarfile
from six.moves import urllib
from easydict import EasyDict as edict
# Networks
import fblib.networks.deeplab_multi_task.deeplab_se_mobilenet_v2_multitask as se_mobilenet_v2
# Common configs
from experiments.dense_predict.common_configs import get_loss, get_train_loader, get_test_loader, get_transformations
from fblib.util.mypath import Path
def parse_args():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Multi-task Learning with PASCAL and MobileNet-v2')
# Select tasks
parser.add_argument('--active_tasks', type=int, nargs='+', default=[1, 1, 1, 1, 1],
help='Which tasks to train?')
# General parameters
parser.add_argument('--arch', type=str, default='mnetv2',
help='network: Mobilenet v2')
parser.add_argument('--pretr', type=str, default='imagenet',
help='pre-trained model: "imagenet" or "scratch"')
parser.add_argument('--trBatch', type=int, default=16,
help='training batch size')
parser.add_argument('-lr', type=float, default=0.001,
help='initial learning rate. poly-learning rate is used.')
parser.add_argument('--lr_dec', type=float, default=1,
help='decoder learning rate multiplier')
parser.add_argument('-wd', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--epochs', type=int, default=130,
help='total number of epochs for training')
parser.add_argument('--resume_epoch', type=int, default=0,
help='Resume Epoch #')
parser.add_argument('--stride', type=int, default=16,
help='output stride of ResNet backbone. If set to 16 saves significant memory')
parser.add_argument('--trNorm', type=str2bool, default=True,
help='train normalization layers of backbone?')
parser.add_argument('--poly', type=str2bool, default=True,
help='Use poly learning rate')
parser.add_argument('--overfit', type=str2bool, default=False,
help='overfit to small subset of data for debugging purposes')
# Squeeze and Excitation Parameters
parser.add_argument('--seenc', type=str2bool, default=True,
help='Squeeze and excitation per task on encoder?')
parser.add_argument('--sedec', type=str2bool, default=True,
help='Squeeze and excitation per task on decoder?')
parser.add_argument('--lr_tsk', type=float, default=-1,
help='Task Specific layer learning rate multiplier')
# Discriminator parameters
parser.add_argument('--dscr', type=str, default='None',
help='Use discriminator?')
parser.add_argument('--lr_dscr', type=int, default=1,
help='learning rate multiplier of discriminator?')
parser.add_argument('--dscr_w', type=float, default=0,
help='weight of discriminator in the range [0, 1]')
parser.add_argument('--dscrd', type=int, default=2,
help='Depth of discriminator')
parser.add_argument('--dscrk', type=int, default=1,
help='kernel size of discriminator')
# Task-specific parameters
parser.add_argument('--edge_w', type=float, default=0.95,
help='weighting the positive loss for boundary detection as w * L_pos + (1 - w) * L_neg')
return parser.parse_args()
def create_config():
cfg = edict()
args = parse_args()
# Parse tasks
assert (len(args.active_tasks) == 5)
args.do_edge = args.active_tasks[0]
args.do_semseg = args.active_tasks[1]
args.do_human_parts = args.active_tasks[2]
args.do_normals = args.active_tasks[3]
args.do_sal = args.active_tasks[4]
print('\nThis script was run with the following parameters:')
for x in vars(args):
print('{}: {}'.format(x, str(getattr(args, x))))
cfg.resume_epoch = args.resume_epoch
cfg.DO_EDGE = args.do_edge
cfg.DO_SEMSEG = args.do_semseg
cfg.DO_HUMAN_PARTS = args.do_human_parts
cfg.DO_NORMALS = args.do_normals
cfg.DO_SAL = args.do_sal
if not cfg.DO_EDGE and not cfg.DO_SEMSEG and not cfg.DO_HUMAN_PARTS and not cfg.DO_NORMALS and not cfg.DO_SAL:
raise ValueError("Select at least one task")
cfg['arch'] = args.arch
cfg['pretr'] = args.pretr
cfg['trBatch'] = args.trBatch
cfg['lr'] = args.lr
cfg['lr_dec'] = args.lr_dec
cfg['wd'] = args.wd
cfg['epochs'] = args.epochs
cfg['stride'] = args.stride
cfg['trNorm'] = args.trNorm
cfg['poly'] = args.poly
# Set squeeze and excitation parameters
cfg['seenc'] = args.seenc
cfg['sedec'] = args.sedec
if args.dscr == 'None':
args.dscr = None
cfg['dscr_type'] = args.dscr
cfg['lr_dscr'] = args.lr_dscr
cfg['dscr_w'] = args.dscr_w
cfg['dscrd'] = args.dscrd
cfg['dscrk'] = args.dscrk
task_args, name_args = get_exp_name(args)
cfg['exp_folder_name'] = 'pascal_mnet'
cfg['exp_name'] = "_".join(name_args)
cfg['tasks_name'] = "_".join(task_args)
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'], cfg['tasks_name'])
cfg['train_db_name'] = ['PASCALContext', ]
cfg['test_db_name'] = 'PASCALContext'
cfg['infer_db_names'] = ['PASCALContext', ]
# Which tasks?
cfg.TASKS = edict()
cfg.TASKS.NAMES = []
cfg.TASKS.NUM_OUTPUT = {} # How many outputs per task?
cfg.TASKS.TB_MIN = {}
cfg.TASKS.TB_MAX = {}
cfg.TASKS.LOSS_MULT = {}
cfg.TASKS.FLAGVALS = {'image': cv2.INTER_CUBIC}
cfg.TASKS.INFER_FLAGVALS = {}
if cfg.DO_EDGE:
# Edge Detection
print('Adding task: Edge Detection')
tmp = 'edge'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp]
cfg.TASKS.LOSS_MULT[tmp] = 50
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
# Add task-specific parameters from parser
cfg['edge_w'] = args.edge_w
cfg['eval_edge'] = False
if cfg.DO_SEMSEG:
# Semantic Segmentation
print('Adding task: Semantic Segmentation')
tmp = 'semseg'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 21
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 1
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_HUMAN_PARTS:
# Human Parts Segmentation
print('Adding task: Human Part Segmentation')
tmp = 'human_parts'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 7
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 2
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_NORMALS:
# Human Parts Segmentation
print('Adding task: Normals')
tmp = 'normals'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 3
cfg.TASKS.TB_MIN[tmp] = -1
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 10
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_CUBIC
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['normloss'] = 1 # Hard-coded L1 loss for normals
if cfg.DO_SAL:
# Saliency Estimation
print('Adding task: Saliency')
tmp = 'sal'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 5
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['lr_tsk'] = len(cfg.TASKS.NAMES) if args.lr_tsk < 0 else args.lr_tsk
cfg.NETWORK = edict()
# Visualize the network on Tensorboard / pdf?
cfg.NETWORK.VIS_NET = False
cfg.TRAIN = edict()
cfg.TRAIN.SCALE = (512, 512)
cfg.TRAIN.MOMENTUM = 0.9
cfg.TRAIN.TENS_VIS = False
cfg.TRAIN.TENS_VIS_INTER = 1000
cfg.TRAIN.TEMP_LOSS_INTER = 1000
cfg.TEST = edict()
# See evolution of the test set when training?
cfg.TEST.USE_TEST = True
cfg.TEST.TEST_INTER = 10
cfg.TEST.SCALE = (512, 512)
cfg.SEED = 0
cfg.EVALUATE = True
cfg.DEBUG = False
cfg['overfit'] = args.overfit
if cfg['overfit']:
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'])
cfg['exp_name'] = 'test'
cfg['save_dir'] = os.path.join(cfg['save_dir_root'], cfg['exp_name'])
return cfg
def check_downloaded(p):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
def _create_url(name):
return 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/astmt/{}.tgz'.format(name)
_model_urls = {
'pascal_mnet_edge_semseg_human_parts_normals_sal_'
'arch-mnetv2_pretr-imagenet_trBatch-16_lr-0.001_epochs-130_trNorm_poly_seenc_sedec_edge_w-0.95_130'
}
ans = False
_check = p['exp_folder_name'] + '_' + p['tasks_name'] + '_' + p['exp_name'] + '_' + str(p['resume_epoch'])
_fpath = os.path.join(Path.exp_dir(), _check + '.tgz')
if _check in _model_urls:
if not os.path.isfile(os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')):
urllib.request.urlretrieve(_create_url(_check), _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.exp_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
ans = True
return ans
def get_net_mnet(p):
"""
Define the network (standard Deeplab ResNet101) and the trainable parameters
"""
print('Creating DeepLab with Mobilenet-V2 model: {}'.format(p.NETWORK))
network = se_mobilenet_v2.se_mobilenet_v2
net = network(n_classes=p.TASKS.NUM_OUTPUT,
pretrained=p['pretr'],
tasks=p.TASKS.NAMES,
output_stride=p['stride'],
train_norm_layers=p['trNorm'],
mod_enc=p['seenc'],
mod_dec=p['sedec'],
use_dscr=(p['dscr_type'] == 'fconv'),
dscr_k=p['dscrk'],
dscr_d=p['dscrd'])
if p['resume_epoch'] != 0:
print("Initializing weights from: {}".format(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')))
state_dict_checkpoint = torch.load(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')
, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict_checkpoint)
return net
def get_train_params(net, p, lr):
print('Adjusting learning rate')
print('Base lr: {}'.format(lr))
print('Decoder lr: {}'.format(lr * p['lr_dec']))
print('Task-specific lr: {}'.format(lr * p['lr_tsk']))
train_params = [{'params': se_mobilenet_v2.get_lr_params(net, part='backbone', tasks=p.TASKS.NAMES),
'lr': lr},
{'params': se_mobilenet_v2.get_lr_params(net, part='decoder', tasks=p.TASKS.NAMES),
'lr': lr * p['lr_dec']},
{'params': se_mobilenet_v2.get_lr_params(net, part='task_specific', tasks=p.TASKS.NAMES),
'lr': lr * p['lr_tsk']}]
if p['dscr_type'] is not None:
print('Discriminator lr: {}'.format(lr * p['lr_dscr']))
train_params.append(
{'params': se_mobilenet_v2.get_lr_params(net, part='discriminator', tasks=p.TASKS.NAMES),
'lr': lr * p['lr_dscr']})
return train_params
def get_exp_name(args):
"""
Creates the name experiment from the configuration file and the arguments
"""
task_dict = {
'do_edge': 0,
'do_semseg': 0,
'do_human_parts': 0,
'do_normals': 0,
'do_sal': 0
}
name_dict = {
'arch': None,
'pretr': None,
'trBatch': None,
'lr': None,
'wd': 1e-04,
'epochs': None,
'stride': 16,
'trNorm': False,
'poly': False,
'seenc': False,
'sedec': False,
'dscr': None,
'lr_dscr': 1,
'dscr_w': ('dscr', None),
'dscrd': ('dscr', None),
'dscrk': ('dscr', None),
'edge_w': ('do_edge', None),
'lr_dec': 1,
'lr_tsk': -1,
}
# Experiment folder (task) string
task_args = [x.replace('do_', '') for x in task_dict if getattr(args, x) != task_dict[x]]
# Experiment name string
name_args = []
for x in name_dict:
# Check dependencies in tuple
if isinstance(name_dict[x], tuple):
elem = name_dict if name_dict[x][0] in name_dict else task_dict
if elem[name_dict[x][0]] == getattr(args, name_dict[x][0]):
continue
if getattr(args, x) != name_dict[x]:
tmp = getattr(args, x)
if isinstance(tmp, list):
tmp = "_".join([str(x) for x in tmp])
else:
tmp = str(tmp)
name_args.append(x + '-' + tmp)
name_args = [x.replace('-True', '') for x in name_args]
return task_args, name_args
| astmt-master | experiments/dense_predict/pascal_mnet/config.py |
MAX_N_IMAGES_PER_GPU = {
'mnetv2-8': 10,
'mnetv2-16': 16,
}
| astmt-master | experiments/dense_predict/pascal_mnet/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import socket
import timeit
import cv2
from datetime import datetime
import imageio
import numpy as np
# PyTorch includes
import torch
import torch.optim as optim
from torch.nn.functional import interpolate
# Custom includes
from fblib.util.helpers import generate_param_report
from fblib.util.dense_predict.utils import lr_poly
from experiments.dense_predict import common_configs
from fblib.util.mtl_tools.multitask_visualizer import TBVisualizer
from fblib.util.model_resources.flops import compute_gflops
from fblib.util.model_resources.num_parameters import count_parameters
from fblib.util.dense_predict.utils import AverageMeter
# Custom optimizer
from fblib.util.optimizer_mtl.select_used_modules import make_closure
# Configuration
from experiments.dense_predict.pascal_mnet import config
# Tensorboard include
from tensorboardX import SummaryWriter
def main():
p = config.create_config()
gpu_id = 0
device = torch.device("cuda:" + str(gpu_id) if torch.cuda.is_available() else "cpu")
p.TEST.BATCH_SIZE = 32
# Setting parameters
n_epochs = p['epochs']
print("Total training epochs: {}".format(n_epochs))
print(p)
print('Training on {}'.format(p['train_db_name']))
snapshot = 10 # Store a model every snapshot epochs
test_interval = p.TEST.TEST_INTER # Run on test set every test_interval epochs
torch.manual_seed(p.SEED)
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if not os.path.exists(os.path.join(p['save_dir'], 'models')):
if p['resume_epoch'] == 0:
os.makedirs(os.path.join(p['save_dir'], 'models'))
else:
if not config.check_downloaded(p):
print('Folder does not exist.No checkpoint to resume from. Exiting')
exit(1)
net = config.get_net_mnet(p)
gflops = compute_gflops(net, in_shape=(p['trBatch'], 3, p.TRAIN.SCALE[0], p.TRAIN.SCALE[1]),
tasks=p.TASKS.NAMES[0])
print('GFLOPS per task: {}'.format(gflops / p['trBatch']))
print('\nNumber of parameters (in millions): {0:.3f}'.format(count_parameters(net) / 1e6))
print('Number of parameters (in millions) for decoder: {0:.3f}\n'.format(count_parameters(net.decoder) / 1e6))
net.to(device)
if p['resume_epoch'] != n_epochs:
criteria_tr = {}
criteria_ts = {}
running_loss_tr = {task: 0. for task in p.TASKS.NAMES}
running_loss_ts = {task: 0. for task in p.TASKS.NAMES}
curr_loss_task = {task: 0. for task in p.TASKS.NAMES}
counter_tr = {task: 0 for task in p.TASKS.NAMES}
counter_ts = {task: 0 for task in p.TASKS.NAMES}
# Discriminator loss variables for logging
running_loss_tr_dscr = 0
running_loss_ts_dscr = 0
# Logging into Tensorboard
log_dir = os.path.join(p['save_dir'], 'models',
datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
# Training parameters and their optimizer
train_params = config.get_train_params(net, p, p['lr'])
optimizer = optim.SGD(train_params, lr=p['lr'], momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
for task in p.TASKS.NAMES:
# Losses
criteria_tr[task] = config.get_loss(p, task)
criteria_ts[task] = config.get_loss(p, task)
criteria_tr[task].to(device)
criteria_ts[task].to(device)
# Preparation of the data loaders
transforms_tr, transforms_ts, _ = config.get_transformations(p)
trainloader = config.get_train_loader(p, db_name=p['train_db_name'], transforms=transforms_tr)
testloader = config.get_test_loader(p, db_name=p['test_db_name'], transforms=transforms_ts)
# TensorBoard Image Visualizer
tb_vizualizer = TBVisualizer(tasks=p.TASKS.NAMES, min_ranges=p.TASKS.TB_MIN, max_ranges=p.TASKS.TB_MAX,
batch_size=p['trBatch'])
generate_param_report(os.path.join(p['save_dir'], exp_name + '.txt'), p)
# Train variables
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
print("Training Network")
# Main Training and Testing Loop
for epoch in range(p['resume_epoch'], n_epochs):
top1_dscr = AverageMeter()
start_time = timeit.default_timer()
# One training epoch
net.train()
alpha = 2. / (1. + np.exp(-10 * ((epoch + 1) / n_epochs))) - 1 # Ganin et al for gradient reversal
if p['dscr_type'] is not None:
print('Value of alpha: {}'.format(alpha))
for ii, sample in enumerate(trainloader):
curr_loss_dscr = 0
# Grab the input
inputs = sample['image']
inputs.requires_grad_()
inputs = inputs.to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
loss_tasks = losses_tasks[task]
running_loss_tr[task] += losses_tasks[task].item()
curr_loss_task[task] = losses_tasks[task].item()
counter_tr[task] += 1
# Store output for logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
# measure loss, accuracy and record accuracy for discriminator
loss_dscr = losses_dscr[task]
running_loss_tr_dscr += losses_dscr[task].item()
curr_loss_dscr += loss_dscr.item()
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
loss = (1 - p['dscr_w']) * loss_tasks + p['dscr_w'] * loss_dscr
else:
loss = loss_tasks
# Backward pass inside make_closure to update only weights that were used during fw pass
optimizer.zero_grad()
optimizer.step(closure=make_closure(loss=loss, net=net))
# Print stuff and log epoch loss into Tensorboard
if ii % num_img_tr == num_img_tr - 1:
print('[Epoch: %d, numImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_tr[task] = running_loss_tr[task] / counter_tr[task]
writer.add_scalar('data/total_loss_epoch' + task,
running_loss_tr[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Loss %s: %f' % (task, running_loss_tr[task]))
running_loss_tr[task] = 0
counter_tr[task] = 0
if p['dscr_type'] is not None:
running_loss_tr_dscr = running_loss_tr_dscr / num_img_tr / len(p.TASKS.NAMES)
writer.add_scalar('data/total_loss_epoch_dscr', running_loss_tr_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_tr_dscr)
print('Train Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
writer.add_scalar('data/train_accuracy_dscr', top1_dscr.avg, epoch)
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log current train loss into Tensorboard
for task in p.TASKS.NAMES:
writer.add_scalar('data/train_loss_iter_' + task, curr_loss_task[task], ii + num_img_tr * epoch)
curr_loss_task[task] = 0.
if p['dscr_type'] is not None:
writer.add_scalar('data/train_loss_dscr_iter', curr_loss_dscr, ii + num_img_tr * epoch)
curr_loss_dscr = 0.
# Log train images to Tensorboard
if p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='train')
if p['poly'] and ii % num_img_tr == num_img_tr - 1:
lr_ = lr_poly(p['lr'], iter_=epoch, max_iter=n_epochs)
print('(poly lr policy) learning rate: {0:.6f}'.format(lr_))
train_params = config.get_train_params(net, p, lr_)
optimizer = optim.SGD(train_params, lr=lr_, momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
optimizer.zero_grad()
# Save the model
if (epoch % snapshot) == snapshot - 1 and epoch != 0:
torch.save(net.state_dict(), os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(epoch) + '.pth'))
# One testing epoch
if p.TEST.USE_TEST and epoch % test_interval == (test_interval - 1):
print('Testing Phase')
top1_dscr = AverageMeter()
net.eval()
start_time = timeit.default_timer()
for ii, sample in enumerate(testloader):
inputs = sample['image'].to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass of the mini-batch
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
running_loss_ts[task] += losses_tasks[task].item()
counter_ts[task] += 1
# For logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
running_loss_ts_dscr += losses_dscr[task].item()
# measure accuracy and record loss for discriminator
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
# Print stuff
if ii % num_img_ts == num_img_ts - 1:
print('[Epoch: %d, numTestImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_ts[task] = running_loss_ts[task] / counter_ts[task]
writer.add_scalar('data/test_loss_' + task + '_epoch',
running_loss_ts[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Testing Loss %s: %f' % (task, running_loss_ts[task]))
running_loss_ts[task] = 0
counter_ts[task] = 0
# Free the graph
losses_tasks = {}
if p['dscr_type'] is not None:
running_loss_ts_dscr = running_loss_ts_dscr / num_img_ts / len(p.TASKS.NAMES)
writer.add_scalar('data/test_loss_dscr', running_loss_ts_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_ts_dscr)
writer.add_scalar('data/test_accuracy_dscr', top1_dscr.avg, epoch)
print('Test Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
# Free the graph
losses_dscr = {}
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log test images to Tensorboard
if p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='test')
writer.close()
# Generate Results
net.eval()
_, _, transforms_infer = config.get_transformations(p)
for db_name in p['infer_db_names']:
testloader = config.get_test_loader(p, db_name=db_name, transforms=transforms_infer, infer=True)
save_dir_res = os.path.join(p['save_dir'], 'Results_' + db_name)
print('Testing network')
# Main Testing Loop
with torch.no_grad():
for ii, sample in enumerate(testloader):
img, meta = sample['image'], sample['meta']
# Forward pass of the mini-batch
inputs = img.to(device)
tasks = net.tasks
for task in tasks:
output, _ = net.forward(inputs, task=task)
save_dir_task = os.path.join(save_dir_res, task)
if not os.path.exists(save_dir_task):
os.makedirs(save_dir_task)
output = interpolate(output, size=(inputs.size()[-2], inputs.size()[-1]),
mode='bilinear', align_corners=False)
output = common_configs.get_output(output, task)
for jj in range(int(inputs.size()[0])):
if len(sample[task][jj].unique()) == 1 and sample[task][jj].unique() == 255:
continue
fname = meta['image'][jj]
result = cv2.resize(output[jj], dsize=(meta['im_size'][1][jj], meta['im_size'][0][jj]),
interpolation=p.TASKS.INFER_FLAGVALS[task])
imageio.imwrite(os.path.join(save_dir_task, fname + '.png'), result.astype(np.uint8))
if p.EVALUATE:
common_configs.eval_all_results(p)
if __name__ == '__main__':
main()
| astmt-master | experiments/dense_predict/pascal_mnet/main.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import cv2
import argparse
import torch
import tarfile
from six.moves import urllib
from easydict import EasyDict as edict
# Networks
import fblib.networks.deeplab_multi_task.deeplab_se_resnet_multitask as se_resnet_multitask
# Common configs
from experiments.dense_predict.common_configs import get_loss, get_train_loader, get_test_loader, get_transformations
from fblib.util.mypath import Path
def parse_args():
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
parser = argparse.ArgumentParser(description='Multi-task Learning for NYUD')
# Select tasks
parser.add_argument('--active_tasks', type=int, nargs='+', default=[1, 1, 1, 1],
help='Which tasks to train?')
# General parameters
parser.add_argument('--arch', type=str, default='se_res50',
help='network: se_res26, se_res50, se_res101')
parser.add_argument('--pretr', type=str, default='imagenet',
help='pre-trained model: "imagenet" or "scratch"')
parser.add_argument('--trBatch', type=int, default=8,
help='training batch size')
parser.add_argument('-lr', type=float, default=0.001,
help='initial learning rate. poly-learning rate is used.')
parser.add_argument('--lr_dec', type=float, default=1,
help='decoder learning rate multiplier')
parser.add_argument('-wd', type=float, default=1e-4,
help='Weight decay')
parser.add_argument('--epochs', type=int, default=200,
help='Total number of epochs for training')
parser.add_argument('--resume_epoch', type=int, default=0,
help='Resume Epoch #')
parser.add_argument('--cls', type=str, default='atrous-v3',
help='Classifier type')
parser.add_argument('--stride', type=int, default=16,
help='Output stride of ResNet backbone. If set to 16 saves significant memory')
parser.add_argument('--trNorm', type=str2bool, default=True,
help='train normalization layers of Backbone?')
parser.add_argument('--dec_w', type=int, default=64,
help='decoder width (default 256 in Deeplab v3+')
parser.add_argument('--overfit', type=str2bool, default=False,
help='overfit to small subset of data for debugging purposes')
# Modulation Parameters
parser.add_argument('--seenc', type=str2bool, default=True,
help='Squeeze and excitation per task for encoder? False will still use 1 SE for all tasks')
parser.add_argument('--sedec', type=str2bool, default=True,
help='Squeeze and excitation per task for decoder? False will not use SE modules')
parser.add_argument('--adapt', type=str2bool, default=True,
help='Use parallel residual adapters?')
parser.add_argument('--lr_tsk', type=float, default=-1,
help='Task Specific layer learning rate multiplier')
# Discriminator parameters
parser.add_argument('--dscr', type=str, default='fconv',
help='Use discriminator?')
parser.add_argument('--lr_dscr', type=int, default=10,
help='learning rate multiplier of discriminator?')
parser.add_argument('--dscr_w', type=float, default=0.01,
help='weight of discriminator in the range [0, 1]')
parser.add_argument('--dscrd', type=int, default=2,
help='Depth of discriminator')
parser.add_argument('--dscrk', type=int, default=3,
help='Kernel size of discriminator')
# Task-specific parameters
parser.add_argument('--edge_w', type=float, default=0.8,
help='Weighting the positive loss for boundary detection as w * L_pos + (1 - w) * L_neg')
return parser.parse_args()
def create_config():
cfg = edict()
args = parse_args()
# Parse tasks
assert (len(args.active_tasks) == 4)
args.do_edge = args.active_tasks[0]
args.do_semseg = args.active_tasks[1]
args.do_normals = args.active_tasks[2]
args.do_depth = args.active_tasks[3]
print('\nThis script was run with the following parameters:')
for x in vars(args):
print('{}: {}'.format(x, str(getattr(args, x))))
cfg.resume_epoch = args.resume_epoch
cfg.DO_EDGE = args.do_edge
cfg.DO_SEMSEG = args.do_semseg
cfg.DO_NORMALS = args.do_normals
cfg.DO_DEPTH = args.do_depth
if not cfg.DO_EDGE and not cfg.DO_SEMSEG and not cfg.DO_NORMALS and not cfg.DO_DEPTH:
raise ValueError("Select at least one task")
cfg['arch'] = args.arch
cfg['pretr'] = args.pretr
cfg['trBatch'] = args.trBatch
cfg['lr'] = args.lr
cfg['lr_dec'] = args.lr_dec
cfg['wd'] = args.wd
cfg['cls'] = args.cls
cfg['epochs'] = args.epochs
cfg['stride'] = args.stride
cfg['trNorm'] = args.trNorm
cfg['dec_w'] = args.dec_w
# Set Modulation (Squeeze and Exciation, Residual Adapters) parameters
cfg['seenc'] = args.seenc
cfg['sedec'] = args.sedec
cfg['adapters'] = args.adapt
if cfg['sedec']:
cfg['norm_per_task'] = True
else:
cfg['norm_per_task'] = False
if args.dscr == 'None':
args.dscr = None
cfg['dscr_type'] = args.dscr
cfg['lr_dscr'] = args.lr_dscr
cfg['dscr_w'] = args.dscr_w
cfg['dscrd'] = args.dscrd
cfg['dscrk'] = args.dscrk
task_args, name_args = get_exp_name(args)
cfg['exp_folder_name'] = 'nyud_resnet'
cfg['exp_name'] = "_".join(name_args)
cfg['tasks_name'] = "_".join(task_args)
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'], cfg['tasks_name'])
cfg['train_db_name'] = ['NYUD', ]
cfg['test_db_name'] = 'NYUD'
cfg['infer_db_names'] = ['NYUD', ]
# Which tasks?
cfg.TASKS = edict()
cfg.TASKS.NAMES = []
cfg.TASKS.NUM_OUTPUT = {} # How many outputs per task?
cfg.TASKS.TB_MIN = {}
cfg.TASKS.TB_MAX = {}
cfg.TASKS.LOSS_MULT = {}
cfg.TASKS.FLAGVALS = {'image': cv2.INTER_CUBIC}
cfg.TASKS.INFER_FLAGVALS = {}
if cfg.DO_EDGE:
# Edge Detection
print('Adding task: Edge Detection')
tmp = 'edge'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp]
cfg.TASKS.LOSS_MULT[tmp] = 50
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
# Add task-specific parameters from parser
cfg['edge_w'] = args.edge_w
cfg['eval_edge'] = False
if cfg.DO_SEMSEG:
# Semantic Segmentation
print('Adding task: Semantic Segmentation')
tmp = 'semseg'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 41
cfg.TASKS.TB_MIN[tmp] = 0
cfg.TASKS.TB_MAX[tmp] = cfg.TASKS.NUM_OUTPUT[tmp] - 1
cfg.TASKS.LOSS_MULT[tmp] = 1
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_NEAREST
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_NEAREST
if cfg.DO_NORMALS:
# Human Parts Segmentation
print('Adding task: Normals')
tmp = 'normals'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 3
cfg.TASKS.TB_MIN[tmp] = -1
cfg.TASKS.TB_MAX[tmp] = 1
cfg.TASKS.LOSS_MULT[tmp] = 10
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_CUBIC
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['normloss'] = 1 # Hard-coded L1 loss for normals
if cfg.DO_DEPTH:
# Depth Estimation
print('Adding task: Depth')
tmp = 'depth'
cfg.TASKS.NAMES.append(tmp)
cfg.TASKS.NUM_OUTPUT[tmp] = 1
cfg.TASKS.TB_MIN[tmp] = 1
cfg.TASKS.TB_MAX[tmp] = 10
cfg.TASKS.LOSS_MULT[tmp] = 1
cfg.TASKS.FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg.TASKS.INFER_FLAGVALS[tmp] = cv2.INTER_LINEAR
cfg['lr_tsk'] = len(cfg.TASKS.NAMES) if args.lr_tsk < 0 else args.lr_tsk
cfg.NETWORK = edict()
# Visualize the network on Tensorboard / pdf?
cfg.NETWORK.VIS_NET = False
cfg.TRAIN = edict()
cfg.TRAIN.SCALE = (512, 512)
cfg.TRAIN.MOMENTUM = 0.9
cfg.TRAIN.TENS_VIS = True
cfg.TRAIN.TENS_VIS_INTER = 1000
cfg.TRAIN.TEMP_LOSS_INTER = 1000
cfg.TEST = edict()
# See evolution of the test set when training?
cfg.TEST.USE_TEST = True
cfg.TEST.TEST_INTER = 10
cfg.TEST.SCALE = (512, 512)
cfg.SEED = 0
cfg.EVALUATE = True
cfg.DEBUG = False
cfg['overfit'] = args.overfit
if cfg['overfit']:
cfg['save_dir_root'] = os.path.join(Path.exp_dir(), cfg['exp_folder_name'])
cfg['exp_name'] = 'test'
cfg['save_dir'] = os.path.join(cfg['save_dir_root'], cfg['exp_name'])
return cfg
def check_downloaded(p):
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
def _create_url(name):
return 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/astmt/{}.tgz'.format(name)
_model_urls = {
'nyud_resnet_edge_semseg_normals_depth_'
'arch-se_res50_pretr-imagenet_trBatch-8_lr-0.001_epochs-200_trNorm_seenc_sedec_adapt_dscr-fconv_lr_dscr'
'-10_dscr_w-0.01_dscrd-2_dscrk-3_edge_w-0.8_200',
}
ans = False
_check = p['exp_folder_name'] + '_' + p['tasks_name'] + '_' + p['exp_name'] + '_' + str(p['resume_epoch'])
_fpath = os.path.join(Path.exp_dir(), _check + '.tgz')
if _check in _model_urls:
if not os.path.isfile(os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')):
urllib.request.urlretrieve(_create_url(_check), _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.exp_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
ans = True
return ans
def get_net_resnet(p):
"""
Define the network (standard Deeplab ResNet101) and the trainable parameters
"""
if p['arch'] == 'se_res26':
network = se_resnet_multitask.se_resnet26
elif p['arch'] == 'se_res50':
network = se_resnet_multitask.se_resnet50
elif p['arch'] == 'se_res101':
network = se_resnet_multitask.se_resnet101
else:
raise NotImplementedError('ResNet: Choose between among se_res26, se_res50, and se_res101')
print('Creating ResNet model: {}'.format(p.NETWORK))
net = network(tasks=p.TASKS.NAMES, n_classes=p.TASKS.NUM_OUTPUT, pretrained=p['pretr'], classifier=p['cls'],
output_stride=p['stride'], train_norm_layers=p['trNorm'], width_decoder=p['dec_w'],
squeeze_enc=p['seenc'], squeeze_dec=p['sedec'], adapters=p['adapters'],
norm_per_task=p['norm_per_task'], dscr_type=p['dscr_type'], dscr_d=p['dscrd'], dscr_k=p['dscrk'])
if p['resume_epoch'] != 0:
print("Initializing weights from: {}".format(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')))
state_dict_checkpoint = torch.load(
os.path.join(p['save_dir'], 'models', 'model_epoch-' + str(p['resume_epoch'] - 1) + '.pth')
, map_location=lambda storage, loc: storage)
net.load_state_dict(state_dict_checkpoint)
return net
def get_train_params(net, p):
train_params = [{'params': se_resnet_multitask.get_lr_params(net, part='backbone', tasks=p.TASKS.NAMES),
'lr': p['lr']},
{'params': se_resnet_multitask.get_lr_params(net, part='decoder', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_dec']},
{'params': se_resnet_multitask.get_lr_params(net, part='task_specific', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_tsk']}]
if p['dscr_type'] is not None:
train_params.append(
{'params': se_resnet_multitask.get_lr_params(net, part='discriminator', tasks=p.TASKS.NAMES),
'lr': p['lr'] * p['lr_dscr']})
return train_params
def get_exp_name(args):
"""
Creates the name experiment from the configuration file and the arguments
"""
task_dict = {
'do_edge': 0,
'do_semseg': 0,
'do_normals': 0,
'do_depth': 0
}
name_dict = {
'arch': None,
'pretr': None,
'trBatch': None,
'lr': None,
'wd': 1e-04,
'epochs': None,
'cls': 'atrous-v3',
'stride': 16,
'trNorm': False,
'dec_w': 64,
'seenc': False,
'sedec': False,
'adapt': False,
'dscr': None,
'lr_dscr': 1,
'dscr_w': ('dscr', None),
'dscrd': ('dscr', None),
'dscrk': ('dscr', None),
'edge_w': ('do_edge', None),
'lr_dec': 1,
'lr_tsk': -1,
}
# Experiment folder (task) string
task_args = [x.replace('do_', '') for x in task_dict if getattr(args, x) != task_dict[x]]
# Experiment name string
name_args = []
for x in name_dict:
# Check dependencies in tuple
if isinstance(name_dict[x], tuple):
elem = name_dict if name_dict[x][0] in name_dict else task_dict
if elem[name_dict[x][0]] == getattr(args, name_dict[x][0]):
continue
if getattr(args, x) != name_dict[x]:
tmp = getattr(args, x)
if isinstance(tmp, list):
tmp = "_".join([str(x) for x in tmp])
else:
tmp = str(tmp)
name_args.append(x + '-' + tmp)
name_args = [x.replace('-True', '') for x in name_args]
return task_args, name_args
| astmt-master | experiments/dense_predict/nyud_resnet/config.py |
MAX_N_IMAGES_PER_GPU = {
'se_res26-8': 10,
'se_res26-16': 16,
'se_res50-8': 8,
'se_res50-16': 16,
'se_res101-8': 2,
'se_res101-16': 10,
}
| astmt-master | experiments/dense_predict/nyud_resnet/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import socket
import timeit
import cv2
from datetime import datetime
import imageio
import scipy.io as sio
import numpy as np
# PyTorch includes
import torch
import torch.optim as optim
from torch.nn.functional import interpolate
# Custom includes
from fblib.util.helpers import generate_param_report
from fblib.util.dense_predict.utils import lr_poly
from experiments.dense_predict import common_configs
from fblib.util.mtl_tools.multitask_visualizer import TBVisualizer, visualize_network
from fblib.util.model_resources.flops import compute_gflops
from fblib.util.model_resources.num_parameters import count_parameters
from fblib.util.dense_predict.utils import AverageMeter
# Custom optimizer
from fblib.util.optimizer_mtl.select_used_modules import make_closure
# Configuration file
from experiments.dense_predict.nyud_resnet import config as config
# Tensorboard include
from tensorboardX import SummaryWriter
def main():
p = config.create_config()
gpu_id = 0
device = torch.device("cuda:" + str(gpu_id) if torch.cuda.is_available() else "cpu")
p.TEST.BATCH_SIZE = 32
# Setting parameters
n_epochs = p['epochs']
print("Total training epochs: {}".format(n_epochs))
print(p)
print('Training on {}'.format(p['train_db_name']))
snapshot = 10 # Store a model every snapshot epochs
test_interval = p.TEST.TEST_INTER # Run on test set every test_interval epochs
torch.manual_seed(p.SEED)
exp_name = os.path.dirname(os.path.abspath(__file__)).split('/')[-1]
if not os.path.exists(os.path.join(p['save_dir'], 'models')):
if p['resume_epoch'] == 0:
os.makedirs(os.path.join(p['save_dir'], 'models'))
else:
if not config.check_downloaded(p):
print('Folder does not exist.No checkpoint to resume from. Exiting')
exit(1)
net = config.get_net_resnet(p)
# Visualize the network
if p.NETWORK.VIS_NET:
visualize_network(net, p)
gflops = compute_gflops(net, in_shape=(p['trBatch'], 3, p.TRAIN.SCALE[0], p.TRAIN.SCALE[1]),
tasks=p.TASKS.NAMES[0])
print('GFLOPS per task: {}'.format(gflops / p['trBatch']))
print('\nNumber of parameters (in millions): {0:.3f}'.format(count_parameters(net) / 1e6))
print('Number of parameters (in millions) for decoder: {0:.3f}\n'.format(count_parameters(net.decoder) / 1e6))
net.to(device)
if p['resume_epoch'] != n_epochs:
criteria_tr = {}
criteria_ts = {}
running_loss_tr = {task: 0. for task in p.TASKS.NAMES}
running_loss_ts = {task: 0. for task in p.TASKS.NAMES}
curr_loss_task = {task: 0. for task in p.TASKS.NAMES}
counter_tr = {task: 0 for task in p.TASKS.NAMES}
counter_ts = {task: 0 for task in p.TASKS.NAMES}
# Discriminator loss variables for logging
running_loss_tr_dscr = 0
running_loss_ts_dscr = 0
# Logging into Tensorboard
log_dir = os.path.join(p['save_dir'], 'models',
datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname())
writer = SummaryWriter(log_dir=log_dir)
# Training parameters and their optimizer
train_params = config.get_train_params(net, p)
optimizer = optim.SGD(train_params, lr=p['lr'], momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
for task in p.TASKS.NAMES:
# Losses
criteria_tr[task] = config.get_loss(p, task)
criteria_ts[task] = config.get_loss(p, task)
criteria_tr[task].to(device)
criteria_ts[task].to(device)
# Preparation of the data loaders
transforms_tr, transforms_ts, _ = config.get_transformations(p)
trainloader = config.get_train_loader(p, db_name=p['train_db_name'], transforms=transforms_tr)
testloader = config.get_test_loader(p, db_name=p['test_db_name'], transforms=transforms_ts)
# TensorBoard Image Visualizer
tb_vizualizer = TBVisualizer(tasks=p.TASKS.NAMES, min_ranges=p.TASKS.TB_MIN, max_ranges=p.TASKS.TB_MAX,
batch_size=p['trBatch'])
generate_param_report(os.path.join(p['save_dir'], exp_name + '.txt'), p)
# Train variables
num_img_tr = len(trainloader)
num_img_ts = len(testloader)
print("Training Network")
# Main Training and Testing Loop
for epoch in range(p['resume_epoch'], n_epochs):
top1_dscr = AverageMeter()
start_time = timeit.default_timer()
# One training epoch
net.train()
alpha = 2. / (1. + np.exp(-10 * ((epoch + 1) / n_epochs))) - 1 # Ganin et al for gradient reversal
if p['dscr_type'] is not None:
print('Value of alpha: {}'.format(alpha))
for ii, sample in enumerate(trainloader):
curr_loss_dscr = 0
# Grab the input
inputs = sample['image']
inputs.requires_grad_()
inputs = inputs.to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
loss_tasks = losses_tasks[task]
running_loss_tr[task] += losses_tasks[task].item()
curr_loss_task[task] = losses_tasks[task].item()
counter_tr[task] += 1
# Store output for logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
# measure loss, accuracy and record accuracy for discriminator
loss_dscr = losses_dscr[task]
running_loss_tr_dscr += losses_dscr[task].item()
curr_loss_dscr += loss_dscr.item()
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
loss = (1 - p['dscr_w']) * loss_tasks + p['dscr_w'] * loss_dscr
else:
loss = loss_tasks
# Backward pass inside make_closure to update only weights that were used during fw pass
optimizer.zero_grad()
optimizer.step(closure=make_closure(loss=loss, net=net))
# Print stuff and log epoch loss into Tensorboard
if ii % num_img_tr == num_img_tr - 1:
print('[Epoch: %d, numImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_tr[task] = running_loss_tr[task] / counter_tr[task]
writer.add_scalar('data/total_loss_epoch' + task,
running_loss_tr[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Loss %s: %f' % (task, running_loss_tr[task]))
running_loss_tr[task] = 0
counter_tr[task] = 0
if p['dscr_type'] is not None:
running_loss_tr_dscr = running_loss_tr_dscr / num_img_tr / len(p.TASKS.NAMES)
writer.add_scalar('data/total_loss_epoch_dscr', running_loss_tr_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_tr_dscr)
print('Train Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
writer.add_scalar('data/train_accuracy_dscr', top1_dscr.avg, epoch)
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log current train loss into Tensorboard
for task in p.TASKS.NAMES:
writer.add_scalar('data/train_loss_iter_' + task, curr_loss_task[task], ii + num_img_tr * epoch)
curr_loss_task[task] = 0.
if p['dscr_type'] is not None:
writer.add_scalar('data/train_loss_dscr_iter', curr_loss_dscr, ii + num_img_tr * epoch)
curr_loss_dscr = 0.
# Log train images to Tensorboard
if p['overfit'] and p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='train')
if ii % num_img_tr == num_img_tr - 1:
lr_ = lr_poly(p['lr'], iter_=epoch, max_iter=n_epochs)
print('(poly lr policy) learning rate: {0:.6f}'.format(lr_))
train_params = config.get_train_params(net, p)
optimizer = optim.SGD(train_params, lr=lr_, momentum=p.TRAIN.MOMENTUM, weight_decay=p['wd'])
optimizer.zero_grad()
# Save the model
if (epoch % snapshot) == snapshot - 1 and epoch != 0:
torch.save(net.state_dict(), os.path.join(p['save_dir'], 'models',
'model_epoch-' + str(epoch) + '.pth'))
# One testing epoch
if p.TEST.USE_TEST and epoch % test_interval == (test_interval - 1):
print('Testing Phase')
top1_dscr = AverageMeter()
net.eval()
start_time = timeit.default_timer()
for ii, sample in enumerate(testloader):
inputs = sample['image'].to(device)
task_gts = list(sample.keys())
tasks = net.tasks
gt_elems = {x: sample[x].to(device, non_blocking=True) for x in tasks}
uniq = {x: gt_elems[x].unique() for x in gt_elems}
outputs = {}
for task in tasks:
if task not in task_gts:
continue
if len(uniq[task]) == 1 and uniq[task][0] == 255:
continue
# Forward pass of the mini-batch
output = {}
features = {}
output[task], features[task] = net.forward(inputs, task=task)
losses_tasks, losses_dscr, outputs_dscr, grads, task_labels \
= net.compute_losses(output, features, criteria_tr, gt_elems, alpha, p)
running_loss_ts[task] += losses_tasks[task].item()
counter_ts[task] += 1
# For logging
outputs[task] = output[task].detach()
if p['dscr_type'] is not None:
running_loss_ts_dscr += losses_dscr[task].item()
# measure accuracy and record loss for discriminator
prec1 = common_configs.accuracy(outputs_dscr[task].data, task_labels[task], topk=(1,))
if prec1 != -1:
top1_dscr.update(prec1[0].item(), task_labels[task].size(0))
# Print stuff
if ii % num_img_ts == num_img_ts - 1:
print('[Epoch: %d, numTestImages: %5d]' % (epoch, ii + 1))
for task in p.TASKS.NAMES:
running_loss_ts[task] = running_loss_ts[task] / counter_ts[task]
writer.add_scalar('data/test_loss_' + task + '_epoch',
running_loss_ts[task] / p.TASKS.LOSS_MULT[task], epoch)
print('Testing Loss %s: %f' % (task, running_loss_ts[task]))
running_loss_ts[task] = 0
counter_ts[task] = 0
# Free the graph
losses_tasks = {}
if p['dscr_type'] is not None:
running_loss_ts_dscr = running_loss_ts_dscr / num_img_ts / len(p.TASKS.NAMES)
writer.add_scalar('data/test_loss_dscr', running_loss_ts_dscr, epoch)
print('Loss Discriminator: %f' % running_loss_ts_dscr)
writer.add_scalar('data/test_accuracy_dscr', top1_dscr.avg, epoch)
print('Test Accuracy Discriminator: Prec@1 {top1.avg:.3f} Error@1 {error1:.3f}'.format(
top1=top1_dscr, error1=100 - top1_dscr.avg))
# Free the graph
losses_dscr = {}
stop_time = timeit.default_timer()
print("Execution time: " + str(stop_time - start_time) + "\n")
# Log test images to Tensorboard
if p.TRAIN.TENS_VIS and ii % p.TRAIN.TENS_VIS_INTER == 0:
curr_iter = ii + num_img_tr * epoch
tb_vizualizer.visualize_images_tb(writer, sample, outputs,
global_step=curr_iter, tag=ii, phase='test')
writer.close()
# Generate Results
net.eval()
_, _, transforms_infer = config.get_transformations(p)
for db_name in p['infer_db_names']:
testloader = config.get_test_loader(p, db_name=db_name, transforms=transforms_infer, infer=True)
save_dir_res = os.path.join(p['save_dir'], 'Results_' + db_name)
print('Testing Network')
# Main Testing Loop
with torch.no_grad():
for ii, sample in enumerate(testloader):
img, meta = sample['image'], sample['meta']
# Forward pass of the mini-batch
inputs = img.to(device)
tasks = net.tasks
for task in tasks:
output, _ = net.forward(inputs, task=task)
save_dir_task = os.path.join(save_dir_res, task)
if not os.path.exists(save_dir_task):
os.makedirs(save_dir_task)
output = interpolate(output, size=(inputs.size()[-2], inputs.size()[-1]),
mode='bilinear', align_corners=False)
output = common_configs.get_output(output, task)
for jj in range(int(inputs.size()[0])):
if len(sample[task][jj].unique()) == 1 and sample[task][jj].unique() == 255:
continue
# Parameters
fname = meta['image'][jj]
result = cv2.resize(output[jj], dsize=(meta['im_size'][1][jj], meta['im_size'][0][jj]),
interpolation=p.TASKS.INFER_FLAGVALS[task])
if task == 'depth':
sio.savemat(os.path.join(save_dir_task, fname + '.mat'), {'depth': result})
else:
imageio.imwrite(os.path.join(save_dir_task, fname + '.png'), result.astype(np.uint8))
if p.EVALUATE:
common_configs.eval_all_results(p)
if __name__ == '__main__':
main()
| astmt-master | experiments/dense_predict/nyud_resnet/main.py |
import os
PROJECT_ROOT_DIR = os.path.dirname(os.path.abspath(__file__)) | astmt-master | fblib/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
class AttentionModuleFree(nn.Module):
"""
Attention Module
"""
def __init__(self, input_size, offset=0.):
super(AttentionModuleFree, self).__init__()
# randomly initialize parameters
self.weight = nn.Parameter(torch.rand(1, input_size, 1, 1) + offset)
def forward(self, x):
return torch.mul(self.weight, x)
class AttentionModule(AttentionModuleFree):
"""
AttentionModuleFree with restricted real-valued parameters within range [0, 1]
"""
def __init__(self, input_size):
super(AttentionModule, self).__init__(input_size, offset=10)
# randomly initialize the parameters
self.sigm = nn.Sigmoid()
def forward(self, x):
return torch.mul(self.sigm(self.weight), x)
class Conv2dAttentionAdapters(nn.Module):
"""
2D convolution followed by optional per-task transformation. The transformation can include the following:
- Residual adapters (in parallel)
- Attention modules (per-task feature multiplications) with gating, which can be binary or real-valued
During forward pass, except for the input tensor, the index of the task is required
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
stride=1,
padding=0,
dilation=1,
groups=1,
bias=False,
n_tasks=1,
adapters=False,
attention=False,
bn_per_task=False,
binary_attention=False):
super(Conv2dAttentionAdapters, self).__init__()
self.adapters = adapters
self.attention = attention
self.bn_per_task = bn_per_task and (self.adapters or self.attention)
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride,
padding=padding, dilation=dilation,
groups=groups, bias=bias)
if self.attention:
print('Constructing attention modules.')
if binary_attention:
print('Binary attention!')
att_module = AttentionModuleFree
else:
att_module = AttentionModule
self.attend = nn.ModuleList([att_module(out_channels) for i in range(n_tasks)])
if self.adapters:
print('Constructing parallel residual adapters.')
self.adapt = nn.ModuleList([
nn.Conv2d(in_channels, out_channels, stride=stride, kernel_size=1, bias=False)for i in range(n_tasks)])
if self.bn_per_task:
print('Constructing per task batchnorm layers')
self.bn = nn.ModuleList([nn.BatchNorm2d(out_channels) for i in range(n_tasks)])
else:
self.bn = nn.BatchNorm2d(out_channels)
def forward(self, x, task=None):
if self.adapters:
adapt = self.adapt[task](x)
x = self.conv(x)
if self.attention:
# print('Attend, task {}'.format(task))
x = self.attend[task](x)
if self.adapters:
# print('adapt, task {}'.format(task))
x += adapt
if self.bn_per_task:
# print('Bnorm, task {}'.format(task))
x = self.bn[task](x)
else:
x = self.bn(x)
return x
class XPathLayer(nn.Module):
"""
Create per task ResNeXt path
"""
def __init__(self,
in_channels,
interm_channels,
out_channels,
stride,
n_tasks):
super(XPathLayer, self).__init__()
self.conv_reduce = nn.ModuleList([nn.Conv2d(in_channels=in_channels,
out_channels=interm_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False) for i in range(n_tasks)])
self.bn_reduce = nn.ModuleList([nn.BatchNorm2d(interm_channels) for i in range(n_tasks)])
self.conv = nn.ModuleList([nn.Conv2d(in_channels=interm_channels,
out_channels=interm_channels,
kernel_size=3,
stride=stride,
padding=1) for i in range(n_tasks)])
self.bn = nn.ModuleList([nn.BatchNorm2d(interm_channels) for i in range(n_tasks)])
self.conv_expand = nn.ModuleList([nn.Conv2d(in_channels=interm_channels,
out_channels=out_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False) for i in range(n_tasks)])
self.bn_expand = nn.ModuleList([nn.BatchNorm2d(out_channels) for i in range(n_tasks)])
def forward(self, x, task=None):
if task is None:
raise NotImplementedError('XPathLayer: Task not given at forward pass')
# Reduce
x = self.conv_reduce[task](x)
x = self.bn_reduce[task](x)
x = F.relu(x, inplace=True)
# Process
x = self.conv[task](x)
x = self.bn[task](x)
x = F.relu(x, inplace=True)
# Expand
x = self.conv_expand[task](x)
x = self.bn_expand[task](x)
return x
| astmt-master | fblib/layers/attention.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.nn as nn
import torch.nn.functional as F
class Normalize(object):
"""Given mean: (R, G, B) and std: (R, G, B),
will normalize each channel of the torch.*Tensor, i.e.
channel = (channel - mean) / std
"""
def __init__(self, mean, std):
self.mean = mean
self.std = std
def __call__(self, tensor):
for i in range(len(self.mean)):
tensor[:, i, :, :].sub_(self.mean[i]).div_(self.std[i])
return tensor
class ImageFeatures(nn.Module):
"""
Forward pass of an image on a pre-trained imagenet model.
Resurns output and features of the forward pass.
"""
def __init__(self, net, mean=None, std=None):
super(ImageFeatures, self).__init__()
if not mean:
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
self.normalize = Normalize(mean=mean, std=std)
self.net = net
def forward(self, x):
x = (x - x.min()) / (x.max() - x.min())
x = F.interpolate(x, size=(224, 224), mode='bilinear', align_corners=False)
x = self.normalize(x)
out, features = self.net(x)
return out, features
def main():
import os
import torch
import pickle
import cv2
import numpy as np
import urllib.request
from fblib import PROJECT_ROOT_DIR
from fblib.networks.classification.resnet import resnet101
classes = pickle.load(urllib.request.urlopen(
'https://gist.githubusercontent.com/yrevar/6135f1bd8dcf2e0cc683/raw/d133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee'
'/imagenet1000_clsid_to_human.pkl'))
model = resnet101(pretrained=True, features=True)
model = ImageFeatures(model)
img = cv2.imread(os.path.join(PROJECT_ROOT_DIR, 'util/img/cat.jpg')).astype(np.float32)
img = img[:, :, :, np.newaxis]
img = img.transpose((3, 2, 0, 1))
img = torch.from_numpy(img.astype(np.float32))
model = model.eval()
with torch.no_grad():
output, features = model(img)
output = torch.nn.functional.softmax(output, dim=1)
print(output.max())
print(output.argmax())
print(classes[np.asscalar(output.argmax().numpy())])
if __name__ == '__main__':
main()
| astmt-master | fblib/layers/image_features.py |
astmt-master | fblib/layers/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch.autograd import Function
class ReverseLayerF(Function):
@staticmethod
def forward(ctx, x, alpha):
ctx.alpha = alpha
return x.view_as(x)
@staticmethod
def backward(ctx, grad_output):
output = grad_output.neg() * ctx.alpha
return output, None
| astmt-master | fblib/layers/reverse_grad.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn.modules.module import Module
import numpy as np
class SoftMaxwithLoss(Module):
"""
This function returns cross entropy loss for semantic segmentation
"""
def __init__(self):
super(SoftMaxwithLoss, self).__init__()
self.softmax = nn.LogSoftmax(dim=1)
self.criterion = nn.NLLLoss(ignore_index=255)
def forward(self, out, label):
assert not label.requires_grad
# out shape batch_size x channels x h x w
# label shape batch_size x 1 x h x w
label = label[:, 0, :, :].long()
loss = self.criterion(self.softmax(out), label)
return loss
class BalancedCrossEntropyLoss(Module):
"""
Balanced Cross Entropy Loss with optional ignore regions
"""
def __init__(self, size_average=True, batch_average=True, pos_weight=None):
super(BalancedCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
self.pos_weight = pos_weight
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
# Weighting of the loss, default is HED-style
if self.pos_weight is None:
num_labels_pos = torch.sum(labels)
num_labels_neg = torch.sum(1.0 - labels)
num_total = num_labels_pos + num_labels_neg
w = num_labels_neg / num_total
else:
w = self.pos_weight
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None and not self.pos_weight:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
num_total = num_total - torch.ge(void_pixels, 0.5).float().sum()
w = num_labels_neg / num_total
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = w * loss_pos + (1 - w) * loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class BinaryCrossEntropyLoss(Module):
"""
Binary Cross Entropy with ignore regions, not balanced.
"""
def __init__(self, size_average=True, batch_average=True):
super(BinaryCrossEntropyLoss, self).__init__()
self.size_average = size_average
self.batch_average = batch_average
def forward(self, output, label, void_pixels=None):
assert (output.size() == label.size())
labels = torch.ge(label, 0.5).float()
output_gt_zero = torch.ge(output, 0).float()
loss_val = torch.mul(output, (labels - output_gt_zero)) - torch.log(
1 + torch.exp(output - 2 * torch.mul(output, output_gt_zero)))
loss_pos_pix = -torch.mul(labels, loss_val)
loss_neg_pix = -torch.mul(1.0 - labels, loss_val)
if void_pixels is not None:
w_void = torch.le(void_pixels, 0.5).float()
loss_pos_pix = torch.mul(w_void, loss_pos_pix)
loss_neg_pix = torch.mul(w_void, loss_neg_pix)
loss_pos = torch.sum(loss_pos_pix)
loss_neg = torch.sum(loss_neg_pix)
final_loss = loss_pos + loss_neg
if self.size_average:
final_loss /= float(np.prod(label.size()))
elif self.batch_average:
final_loss /= label.size()[0]
return final_loss
class ImGrad(nn.Module):
"""
Compute the spatial gradients of input with Sobel filter, in order to penalize gradient mismatch.
Used for depth prediction
"""
def __init__(self):
super(ImGrad, self).__init__()
self.convx = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)
self.convy = nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)
fx = np.array([[1, 0, -1],
[2, 0, -2],
[1, 0, -1]])
fy = np.array([[1, 2, 1],
[0, 0, 0],
[-1, -2, -1]])
weight_x = torch.from_numpy(fx).float().unsqueeze(0).unsqueeze(0)
weight_y = torch.from_numpy(fy).float().unsqueeze(0).unsqueeze(0)
self.convx.weight.data = weight_x
self.convy.weight.data = weight_y
for p in self.parameters():
p.requires_grad = False
def forward(self, x):
grad_x = self.convx(x)
grad_y = self.convy(x)
return grad_x, grad_y
class GradLoss(nn.Module):
"""
Compute gradient loss using ImGrad
"""
def __init__(self, ignore_label=255):
super(GradLoss, self).__init__()
self.imgrad = ImGrad()
self.ignore_label = ignore_label
def forward(self, out, label):
if self.ignore_label:
n_valid = torch.sum(label != self.ignore_label).item()
label[label == self.ignore_label] = 0
out_grad_x, out_grad_y = self.imgrad(out)
label_grad_x, label_grad_y = self.imgrad(label)
out_grad = torch.cat((out_grad_y, out_grad_x), dim=1)
label_grad = torch.cat((label_grad_y, label_grad_x), dim=1)
# L1 norm
loss = torch.abs(out_grad - label_grad)
if self.ignore_label:
loss = torch.sum(loss) / n_valid
else:
loss = torch.mean(loss)
return loss
class RMSE_log(nn.Module):
def __init__(self, ignore_label=255):
super(RMSE_log, self).__init__()
self.ignore_label = ignore_label
def forward(self, out, label):
out[out <= 0] = 1e-6
log_mse = (torch.log(label) - torch.log(out)) ** 2
# Only inside valid pixels
if self.ignore_label:
n_valid = torch.sum(label != self.ignore_label).item()
log_mse[label == self.ignore_label] = 0
log_mse = torch.sum(log_mse) / n_valid
else:
log_mse = torch.mean(log_mse)
loss = torch.sqrt(log_mse)
return loss
class L1loss(nn.Module):
"""
L1 loss with ignore labels
"""
def __init__(self, ignore_label=255):
super(L1loss, self).__init__()
self.loss_func = F.l1_loss
self.ignore_label = ignore_label
def forward(self, out, label):
if self.ignore_label:
n_valid = torch.sum(label != self.ignore_label).item()
loss = torch.abs(out - label)
loss[label == self.ignore_label] = 0
loss = loss.sum()
if self.ignore_label:
loss.div_(max(n_valid, 1e-6))
else:
loss.div(float(np.prod(label.size())))
return loss
class DepthLoss(nn.Module):
"""
Loss for depth prediction. Combination of L1 loss and Gradient loss
"""
def __init__(self):
super(DepthLoss, self).__init__()
self.diff_loss = L1loss(ignore_label=255)
self.grad_loss = GradLoss(ignore_label=255)
def forward(self, out, label):
loss_diff = self.diff_loss(out, label)
loss_grad = self.grad_loss(out, label)
loss = loss_diff + loss_grad
return loss
def normal_ize(bottom, dim=1):
qn = torch.norm(bottom, p=2, dim=dim).unsqueeze(dim=dim) + 1e-12
return bottom.div(qn)
class Normalize(nn.Module):
def __init__(self):
super(Normalize, self).__init__()
def forward(self, bottom):
qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12
top = bottom.div(qn)
return top
class NormalsLoss(Module):
"""
L1 loss with ignore labels
normalize: normalization for surface normals
"""
def __init__(self, size_average=True, normalize=False, norm=1):
super(NormalsLoss, self).__init__()
self.size_average = size_average
if normalize:
self.normalize = Normalize()
else:
self.normalize = None
if norm == 1:
print('Using L1 loss for surface normals')
self.loss_func = F.l1_loss
elif norm == 2:
print('Using L2 loss for surface normals')
self.loss_func = F.mse_loss
else:
raise NotImplementedError
def forward(self, out, label, ignore_label=255):
assert not label.requires_grad
if ignore_label:
n_valid = torch.sum(label != ignore_label).item()
out[label == ignore_label] = 0
label[label == ignore_label] = 0
if self.normalize is not None:
out = self.normalize(out)
loss = self.loss_func(out, label, reduction='sum')
if self.size_average:
if ignore_label:
loss.div_(max(n_valid, 1e-6))
else:
loss.div_(float(np.prod(label.size())))
return loss
def normals_test():
from fblib.dataloaders.pascal_context import PASCALContext
flagvals = {'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'human_parts': cv2.INTER_NEAREST,
'normals': cv2.INTER_CUBIC}
transform = Compose([tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-90, 90), scales=(1., 1.),
flagvals=flagvals),
tr.FixedResize(resolutions={x: (512, 512) for x in flagvals},
flagvals=flagvals),
tr.AddIgnoreRegions(),
tr.ToTensor()])
dataset_human = PASCALContext(split=['train', 'val'], transform=transform, retname=True,
do_edge=True, do_human_parts=True, do_semseg=True, do_normals=True)
dataloader = torch.utils.data.DataLoader(dataset_human, batch_size=2, shuffle=False, num_workers=0)
criterion = NormalsLoss(normalize=True)
for i, sample in enumerate(dataloader):
assert (sample['normals'].size()[2:] == sample['image'].size()[2:])
loss = criterion(sample['normals'], sample['normals'])
print('Sample number: {}. Loss: {} (should be very close to 0)'.format(i, loss.item()))
def depth_test():
from fblib.dataloaders.nyud import NYUD_MT
flagvals = {'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'normals': cv2.INTER_LINEAR,
'depth': cv2.INTER_LINEAR}
transform = Compose([tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-90, 90), scales=(1., 1.),
flagvals=flagvals),
tr.FixedResize(resolutions={x: (512, 512) for x in flagvals},
flagvals=flagvals),
tr.AddIgnoreRegions(),
tr.ToTensor()])
dataset_human = NYUD_MT(split=['train', 'val'], transform=transform, retname=True,
do_edge=True, do_semseg=True, do_normals=True, do_depth=True)
dataloader = torch.utils.data.DataLoader(dataset_human, batch_size=2, shuffle=False, num_workers=0)
criterion = DepthLoss()
for i, sample in enumerate(dataloader):
loss = criterion(sample['depth'], sample['depth'])
print('Sample number: {}. Loss: {} (should be 0)'.format(i, loss.item()))
if __name__ == '__main__':
import cv2
from torchvision.transforms import Compose
import fblib.dataloaders.custom_transforms as tr
normals_test()
| astmt-master | fblib/layers/loss.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy as np
import torch
from torch.nn import functional as F
def logit(x):
return np.log(x/(1-x+1e-08)+1e-08)
def sigmoid_np(x):
return 1/(1+np.exp(-x))
def center_crop(x, height, width):
crop_h = torch.FloatTensor([x.size()[2]]).sub(height).div(-2)
crop_w = torch.FloatTensor([x.size()[3]]).sub(width).div(-2)
# fixed indexing for PyTorch 0.4
return F.pad(x, [int(crop_w.ceil()[0]), int(crop_w.floor()[0]), int(crop_h.ceil()[0]), int(crop_h.floor()[0])])
def upsample_filt(size):
factor = (size + 1) // 2
if size % 2 == 1:
center = factor - 1
else:
center = factor - 0.5
og = np.ogrid[:size, :size]
return (1 - abs(og[0] - center) / factor) * \
(1 - abs(og[1] - center) / factor)
def interp_surgery(lay):
"""
Set parameters s.t. deconvolutional layers compute bilinear interpolation
Only for deconvolution without groups
"""
m, k, h, w = lay.weight.data.size()
if m != k:
print('input + output channels need to be the same')
raise ValueError
if h != w:
print('filters need to be square')
raise ValueError
filt = upsample_filt(h)
for i in range(m):
lay.weight[i, i, :, :].data.copy_(torch.from_numpy(filt))
return lay.weight.data
| astmt-master | fblib/layers/misc_layers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from torch import nn
from fblib.util.custom_container import SequentialMultiTask
class SELayer(nn.Module):
"""
Squeeze and Excitation Layer
"""
def __init__(self, channel, reduction=16):
super(SELayer, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class SELayerMultiTaskDict(nn.Module):
"""
Squeeze and Excitation Layer for multiple tasks (dict)
"""
def __init__(self, channel, reduction=16, tasks=None):
super(SELayerMultiTaskDict, self).__init__()
self.tasks = tasks
self.avg_pool = nn.AdaptiveAvgPool2d(1)
if self.tasks is None:
self.fc = nn.Sequential(nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid())
else:
print('Initializing squeeze and excitation modules:')
self.fc = nn.ModuleDict()
for task in self.tasks:
print('SE for task: {}'.format(task))
self.fc[task] = SequentialMultiTask(nn.Linear(channel, channel // reduction),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid())
def forward(self, x, task=None):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
if self.tasks:
y = self.fc[task](y).view(b, c, 1, 1)
else:
y = self.fc(y).view(b, c, 1, 1)
return x * y
class ConvCoupledSE(nn.Module):
"""
SE-layer per task, coupled with convolutions and batchnorm.
Possibility to place convolutions before/after bn, deploy bn per task, and use/not use SE attention.
"""
def __init__(self, tasks,
process_layers=None,
norm=None,
norm_kwargs=None,
norm_per_task=False,
squeeze=False,
adapters=False,
se_after_relu=True,
reduction=16):
super(ConvCoupledSE, self).__init__()
self.norm_per_task = norm_per_task
self.squeeze = squeeze
self.adapters = adapters
self.se_after_relu = se_after_relu
if not isinstance(process_layers, list):
process_layers = [process_layers]
self.process = nn.Sequential(*process_layers)
se_module = SELayerMultiTaskDict
if self.squeeze:
self.se = se_module(process_layers[-1].out_channels, tasks=tasks, reduction=reduction)
if self.adapters:
print('Using parallel adapters')
self.adapt = nn.ModuleDict({task: nn.Conv2d(process_layers[-1].in_channels, process_layers[-1].out_channels,
kernel_size=1, bias=False) for task in tasks})
if self.norm_per_task:
self.norm = nn.ModuleDict({task: norm(**norm_kwargs) for task in tasks})
else:
self.norm = norm(**norm_kwargs)
self.relu = nn.ReLU(inplace=True)
def forward(self, x, task):
if self.adapters:
x = self.process(x) + self.adapt[task](x)
else:
x = self.process(x)
if self.squeeze and not self.se_after_relu:
x = self.se(x, task)
if self.norm_per_task:
x = self.norm[task](x)
else:
x = self.norm(x)
x = self.relu(x)
if self.squeeze and self.se_after_relu:
x = self.se(x, task)
return x
| astmt-master | fblib/layers/squeeze.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torch.autograd import Variable
from torchvision import models
from graphviz import Digraph
def make_dot(var, params):
""" Produces Graphviz representation of PyTorch autograd graph
Blue nodes are the Variables that require grad, orange are Tensors
saved for backward in torch.autograd.Function
Args:
var: output Variable
params: dict of (name, Variable) to add names to node that
require grad
"""
param_map = {id(v): k for k, v in params.items()}
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='12',
ranksep='0.1',
height='0.2')
dot = Digraph(node_attr=node_attr, graph_attr=dict(size="12,12"))
seen = set()
def size_to_str(size):
return '(' + (', ').join(['%d' % v for v in size]) + ')'
def add_nodes(var):
if var not in seen:
if torch.is_tensor(var):
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
elif hasattr(var, 'variable'):
u = var.variable
node_name = '%s\n %s' % (param_map.get(id(u)), size_to_str(u.size()))
dot.node(str(id(var)), node_name, fillcolor='lightblue')
else:
dot.node(str(id(var)), str(type(var).__name__))
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
if type(var) == list:
for ii in range(0, len(var)):
add_nodes(var[ii].grad_fn)
elif type(var) == dict:
for x in var:
add_nodes(var[x].grad_fn)
else:
add_nodes(var.grad_fn)
return dot
if __name__ == "__main__":
inputs = torch.randn(1, 3, 224, 224)
resnet18 = models.resnet18()
y = resnet18(Variable(inputs))
g = make_dot(y, resnet18.state_dict())
g.view()
| astmt-master | fblib/util/pdf_visualizer.py |
astmt-master | fblib/util/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import collections
import re
from torch._six import string_classes, int_classes
_use_shared_memory = False
r"""Whether to use shared memory in default_collate"""
numpy_type_map = {
'float64': torch.DoubleTensor,
'float32': torch.FloatTensor,
'float16': torch.HalfTensor,
'int64': torch.LongTensor,
'int32': torch.IntTensor,
'int16': torch.ShortTensor,
'int8': torch.CharTensor,
'uint8': torch.ByteTensor,
}
def collate_mil(batch):
"""
Puts each data field into a tensor with outer dimension batch size.
Custom-made for supporting MIL
"""
error_msg = "batch must contain tensors, numbers, dicts or lists; found {}"
elem_type = type(batch[0])
if isinstance(batch[0], torch.Tensor):
out = None
if _use_shared_memory:
# If we're in a background process, concatenate directly into a
# shared memory tensor to avoid an extra copy
numel = sum([x.numel() for x in batch])
storage = batch[0].storage()._new_shared(numel)
out = batch[0].new(storage)
return torch.stack(batch, 0, out=out)
elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \
and elem_type.__name__ != 'string_':
elem = batch[0]
if elem_type.__name__ == 'ndarray':
# array of string classes and object
if re.search('[SaUO]', elem.dtype.str) is not None:
raise TypeError(error_msg.format(elem.dtype))
return torch.stack([torch.from_numpy(b) for b in batch], 0)
if elem.shape == (): # scalars
py_type = float if elem.dtype.name.startswith('float') else int
return numpy_type_map[elem.dtype.name](list(map(py_type, batch)))
elif isinstance(batch[0], int_classes):
return torch.LongTensor(batch)
elif isinstance(batch[0], float):
return torch.DoubleTensor(batch)
elif isinstance(batch[0], string_classes):
return batch
elif isinstance(batch[0], collections.Mapping):
batch_modified = {key: collate_mil([d[key] for d in batch]) for key in batch[0] if key.find('idx') < 0}
if 'edgeidx' in batch[0]:
batch_modified['edgeidx'] = [batch[x]['edgeidx'] for x in range(len(batch))]
return batch_modified
elif isinstance(batch[0], collections.Sequence):
transposed = zip(*batch)
return [collate_mil(samples) for samples in transposed]
raise TypeError((error_msg.format(type(batch[0])))) | astmt-master | fblib/util/custom_collate.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import cv2
import numpy as np
# set random seed in each worker
worker_seed = lambda x: np.random.seed((torch.initial_seed()) % 2 ** 32)
def tens2image(tens):
"""Converts tensor with 2 or 3 dimensions to numpy array"""
im = tens.numpy()
if im.shape[0] == 1:
im = np.squeeze(im, axis=0)
if im.ndim == 3:
im = im.transpose((1, 2, 0))
return im
def pascal_color_map(N=256, normalized=False):
"""
Python implementation of the color map function for the PASCAL VOC data set.
Official Matlab version can be found in the PASCAL VOC devkit
http://host.robots.ox.ac.uk/pascal/VOC/voc2012/index.html#devkit
"""
def bitget(byteval, idx):
return (byteval & (1 << idx)) != 0
dtype = 'float32' if normalized else 'uint8'
cmap = np.zeros((N, 3), dtype=dtype)
for i in range(N):
r = g = b = 0
c = i
for j in range(8):
r = r | (bitget(c, 0) << 7 - j)
g = g | (bitget(c, 1) << 7 - j)
b = b | (bitget(c, 2) << 7 - j)
c = c >> 3
cmap[i] = np.array([r, g, b])
cmap = cmap / 255 if normalized else cmap
return cmap
def fixed_resize(sample, resolution, flagval=None):
"""
Fixed resize to
resolution (tuple): resize image to size specified by tuple eg. (512, 512).
resolution (int): bring smaller side to resolution eg. image of shape 321 x 481 -> 512 x 767
"""
if flagval is None:
if ((sample == 0) | (sample == 1)).all():
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
if isinstance(resolution, int):
tmp = [resolution, resolution]
tmp[int(np.argmax(sample.shape[:2]))] = int(
round(float(resolution) / np.min(sample.shape[:2]) * np.max(sample.shape[:2])))
resolution = tuple(tmp)
if sample.ndim == 2 or (sample.ndim == 3 and sample.shape[2] == 3):
sample = cv2.resize(sample, resolution[::-1], interpolation=flagval)
else:
tmp = sample
sample = np.zeros(np.append(resolution, tmp.shape[2]), dtype=np.float32)
for ii in range(sample.shape[2]):
sample[:, :, ii] = cv2.resize(tmp[:, :, ii], resolution[::-1], interpolation=flagval)
return sample
def im_normalize(im, max_value=1):
"""
Normalize image to range 0 - max_value
"""
imn = max_value * (im - im.min()) / max((im.max() - im.min()), 1e-8)
return imn
def generate_param_report(logfile, param):
log_file = open(logfile, 'w')
for key, val in param.items():
log_file.write(key + ':' + str(val) + '\n')
log_file.close()
def ind2sub(array_shape, inds):
rows, cols = [], []
for k in range(len(inds)):
if inds[k] == 0:
continue
cols.append((inds[k].astype('int') // array_shape[1]))
rows.append((inds[k].astype('int') % array_shape[1]))
return rows, cols
def main():
import matplotlib.pyplot as plt
from torch.utils.data import DataLoader
from fblib.dataloaders.bsds import BSDS500
from fblib.dataloaders.custom_transforms import ToTensor
db = BSDS500(transform=ToTensor())
dataloader = DataLoader(db, batch_size=1)
for i, sample in enumerate(dataloader):
img = sample['image']
plt.imshow(im_normalize(fixed_resize(tens2image(img), resolution=512)))
plt.show()
if __name__ == "__main__":
main()
| astmt-master | fblib/util/helpers.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from collections import OrderedDict
from torch.nn.modules.container import Sequential
class SequentialMultiTask(Sequential):
"""A sequential container for multiple tasks.
Forward pass re-written to incorporate multiple tasks
"""
def __init__(self, *args):
super(SequentialMultiTask, self).__init__(*args)
def __getitem__(self, idx):
if isinstance(idx, slice):
return SequentialMultiTask(OrderedDict(list(self._modules.items())[idx]))
else:
return self._get_item_by_idx(self._modules.values(), idx)
def forward(self, input, task=None):
for module in self._modules.values():
if task is None:
input = module(input)
else:
input = module(input, task)
return input
| astmt-master | fblib/util/custom_container.py |
astmt-master | fblib/util/classification/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import time
import random
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def time_string():
ISOTIMEFORMAT = '%Y-%m-%d %X'
string = '[{}]'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
return string
def convert_secs2time(epoch_time):
need_hour = int(epoch_time / 3600)
need_mins = int((epoch_time - 3600 * need_hour) / 60)
need_secs = int(epoch_time - 3600 * need_hour - 60 * need_mins)
return need_hour, need_mins, need_secs
def time_file_str():
ISOTIMEFORMAT = '%Y-%m-%d'
string = '{}'.format(time.strftime(ISOTIMEFORMAT, time.gmtime(time.time())))
return string + '-{}'.format(random.randint(1, 10000))
| astmt-master | fblib/util/classification/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
from torchvision import utils as vutils
import fblib.util.pdf_visualizer as viz
from fblib.util.mypath import Path
def visualize_network(net, p):
net.eval()
x = torch.randn(1, 3, 512, 512)
x.requires_grad_()
# pdf visualizer
y = {}
for task in p.TASKS.NAMES:
y[task], _ = net.forward(x, task)
g = viz.make_dot(y, net.state_dict())
g.view(directory=Path.save_root_dir())
class TBVisualizer(object):
def __init__(self, tasks, min_ranges, max_ranges, batch_size):
# Visualization settings
self.grid_input = {
'image': {
'range': (0, 255),
'normalize': True,
'scale_each': True,
'nrow': batch_size
}}
self.grid_output = {}
for task in tasks:
min_range = min_ranges[task]
max_range = max_ranges[task]
self.grid_input[task] = {
'range': (min_range, max_range),
'normalize': True,
'scale_each': True,
'nrow': batch_size
}
self.grid_output[task+'_pred'] = {
'range': (min_range, max_range),
'normalize': True,
'scale_each': True,
'nrow': batch_size
}
def visualize_images_tb(self, writer, sample, outputs, global_step, tag, phase='train'):
"""Vizualize images into Tensorboard
writer: Tensorboardx summary writer
sample: dataloader sample that contains a dict of tensors, aka images and groundtruths
grid_input: see function get_visualizer()
grid_output: see function get_visualizer()
global_step: global iteration num
tag: current iteration num to tag on tensorboard
phase: 'train' or 'test
"""
for k in list(self.grid_input.keys()):
if k in sample.keys():
elem = sample[k].detach()
if k in {'normals', 'depth'}:
elem[elem == 255] = 0
img_grid = vutils.make_grid(elem, **self.grid_input[k])
writer.add_image(f'{k}_gt/{phase}_{tag}', img_grid, global_step)
for k in list(outputs.keys()):
if (k + '_pred') in self.grid_output.keys():
output = outputs[k].detach()
if k == 'normals':
elem = self._normalize(output)
elif k in {'depth', 'albedo'}:
elem = output
elif output.size()[1] == 1:
elem = 1 / (1 + torch.exp(-output))
else:
_, argmax_pred = torch.max(output, dim=1)
argmax_pred = argmax_pred.type(torch.FloatTensor)
elem = torch.unsqueeze(argmax_pred, 1)
img_grid = vutils.make_grid(elem, **self.grid_output[k + '_pred'])
writer.add_image(f'{k}_pred/{phase}_{tag}', img_grid, global_step)
@staticmethod
def _normalize(bottom):
qn = torch.norm(bottom, p=2, dim=1).unsqueeze(dim=1) + 1e-12
return bottom.div(qn)
| astmt-master | fblib/util/mtl_tools/multitask_visualizer.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def imagenet_categ_names():
return { 0: 'tench, Tinca tinca',
1: 'goldfish, Carassius auratus',
2: 'great white shark, white shark, man-eater, man-eating shark, Carcharodon carcharias',
3: 'tiger shark, Galeocerdo cuvieri',
4: 'hammerhead, hammerhead shark',
5: 'electric ray, crampfish, numbfish, torpedo',
6: 'stingray',
7: 'cock',
8: 'hen',
9: 'ostrich, Struthio camelus',
10: 'brambling, Fringilla montifringilla',
11: 'goldfinch, Carduelis carduelis',
12: 'house finch, linnet, Carpodacus mexicanus',
13: 'junco, snowbird',
14: 'indigo bunting, indigo finch, indigo bird, Passerina cyanea',
15: 'robin, American robin, Turdus migratorius',
16: 'bulbul',
17: 'jay',
18: 'magpie',
19: 'chickadee',
20: 'water ouzel, dipper',
21: 'kite',
22: 'bald eagle, American eagle, Haliaeetus leucocephalus',
23: 'vulture',
24: 'great grey owl, great gray owl, Strix nebulosa',
25: 'European fire salamander, Salamandra salamandra',
26: 'common newt, Triturus vulgaris',
27: 'eft',
28: 'spotted salamander, Ambystoma maculatum',
29: 'axolotl, mud puppy, Ambystoma mexicanum',
30: 'bullfrog, Rana catesbeiana',
31: 'tree frog, tree-frog',
32: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
33: 'loggerhead, loggerhead turtle, Caretta caretta',
34: 'leatherback turtle, leatherback, leathery turtle, Dermochelys coriacea',
35: 'mud turtle',
36: 'terrapin',
37: 'box turtle, box tortoise',
38: 'banded gecko',
39: 'common iguana, iguana, Iguana iguana',
40: 'American chameleon, anole, Anolis carolinensis',
41: 'whiptail, whiptail lizard',
42: 'agama',
43: 'frilled lizard, Chlamydosaurus kingi',
44: 'alligator lizard',
45: 'Gila monster, Heloderma suspectum',
46: 'green lizard, Lacerta viridis',
47: 'African chameleon, Chamaeleo chamaeleon',
48: 'Komodo dragon, Komodo lizard, dragon lizard, giant lizard, Varanus komodoensis',
49: 'African crocodile, Nile crocodile, Crocodylus niloticus',
50: 'American alligator, Alligator mississipiensis',
51: 'triceratops',
52: 'thunder snake, worm snake, Carphophis amoenus',
53: 'ringneck snake, ring-necked snake, ring snake',
54: 'hognose snake, puff adder, sand viper',
55: 'green snake, grass snake',
56: 'king snake, kingsnake',
57: 'garter snake, grass snake',
58: 'water snake',
59: 'vine snake',
60: 'night snake, Hypsiglena torquata',
61: 'boa constrictor, Constrictor constrictor',
62: 'rock python, rock snake, Python sebae',
63: 'Indian cobra, Naja naja',
64: 'green mamba',
65: 'sea snake',
66: 'horned viper, cerastes, sand viper, horned asp, Cerastes cornutus',
67: 'diamondback, diamondback rattlesnake, Crotalus adamanteus',
68: 'sidewinder, horned rattlesnake, Crotalus cerastes',
69: 'trilobite',
70: 'harvestman, daddy longlegs, Phalangium opilio',
71: 'scorpion',
72: 'black and gold garden spider, Argiope aurantia',
73: 'barn spider, Araneus cavaticus',
74: 'garden spider, Aranea diademata',
75: 'black widow, Latrodectus mactans',
76: 'tarantula',
77: 'wolf spider, hunting spider',
78: 'tick',
79: 'centipede',
80: 'black grouse',
81: 'ptarmigan',
82: 'ruffed grouse, partridge, Bonasa umbellus',
83: 'prairie chicken, prairie grouse, prairie fowl',
84: 'peacock',
85: 'quail',
86: 'partridge',
87: 'African grey, African gray, Psittacus erithacus',
88: 'macaw',
89: 'sulphur-crested cockatoo, Kakatoe galerita, Cacatua galerita',
90: 'lorikeet',
91: 'coucal',
92: 'bee eater',
93: 'hornbill',
94: 'hummingbird',
95: 'jacamar',
96: 'toucan',
97: 'drake',
98: 'red-breasted merganser, Mergus serrator',
99: 'goose',
100: 'black swan, Cygnus atratus',
101: 'tusker',
102: 'echidna, spiny anteater, anteater',
103: 'platypus, duckbill, duckbilled platypus, duck-billed platypus, Ornithorhynchus anatinus',
104: 'wallaby, brush kangaroo',
105: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus',
106: 'wombat',
107: 'jellyfish',
108: 'sea anemone, anemone',
109: 'brain coral',
110: 'flatworm, platyhelminth',
111: 'nematode, nematode worm, roundworm',
112: 'conch',
113: 'snail',
114: 'slug',
115: 'sea slug, nudibranch',
116: 'chiton, coat-of-mail shell, sea cradle, polyplacophore',
117: 'chambered nautilus, pearly nautilus, nautilus',
118: 'Dungeness crab, Cancer magister',
119: 'rock crab, Cancer irroratus',
120: 'fiddler crab',
121: 'king crab, Alaska crab, Alaskan king crab, Alaska king crab, Paralithodes camtschatica',
122: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
123: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
124: 'crayfish, crawfish, crawdad, crawdaddy',
125: 'hermit crab',
126: 'isopod',
127: 'white stork, Ciconia ciconia',
128: 'black stork, Ciconia nigra',
129: 'spoonbill',
130: 'flamingo',
131: 'little blue heron, Egretta caerulea',
132: 'American egret, great white heron, Egretta albus',
133: 'bittern',
134: 'crane',
135: 'limpkin, Aramus pictus',
136: 'European gallinule, Porphyrio porphyrio',
137: 'American coot, marsh hen, mud hen, water hen, Fulica americana',
138: 'bustard',
139: 'ruddy turnstone, Arenaria interpres',
140: 'red-backed sandpiper, dunlin, Erolia alpina',
141: 'redshank, Tringa totanus',
142: 'dowitcher',
143: 'oystercatcher, oyster catcher',
144: 'pelican',
145: 'king penguin, Aptenodytes patagonica',
146: 'albatross, mollymawk',
147: 'grey whale, gray whale, devilfish, Eschrichtius gibbosus, Eschrichtius robustus',
148: 'killer whale, killer, orca, grampus, sea wolf, Orcinus orca',
149: 'dugong, Dugong dugon',
150: 'sea lion',
151: 'Chihuahua',
152: 'Japanese spaniel',
153: 'Maltese dog, Maltese terrier, Maltese',
154: 'Pekinese, Pekingese, Peke',
155: 'Shih-Tzu',
156: 'Blenheim spaniel',
157: 'papillon',
158: 'toy terrier',
159: 'Rhodesian ridgeback',
160: 'Afghan hound, Afghan',
161: 'basset, basset hound',
162: 'beagle',
163: 'bloodhound, sleuthhound',
164: 'bluetick',
165: 'black-and-tan coonhound',
166: 'Walker hound, Walker foxhound',
167: 'English foxhound',
168: 'redbone',
169: 'borzoi, Russian wolfhound',
170: 'Irish wolfhound',
171: 'Italian greyhound',
172: 'whippet',
173: 'Ibizan hound, Ibizan Podenco',
174: 'Norwegian elkhound, elkhound',
175: 'otterhound, otter hound',
176: 'Saluki, gazelle hound',
177: 'Scottish deerhound, deerhound',
178: 'Weimaraner',
179: 'Staffordshire bullterrier, Staffordshire bull terrier',
180: 'American Staffordshire terrier, Staffordshire terrier, American pit bull terrier, pit bull terrier',
181: 'Bedlington terrier',
182: 'Border terrier',
183: 'Kerry blue terrier',
184: 'Irish terrier',
185: 'Norfolk terrier',
186: 'Norwich terrier',
187: 'Yorkshire terrier',
188: 'wire-haired fox terrier',
189: 'Lakeland terrier',
190: 'Sealyham terrier, Sealyham',
191: 'Airedale, Airedale terrier',
192: 'cairn, cairn terrier',
193: 'Australian terrier',
194: 'Dandie Dinmont, Dandie Dinmont terrier',
195: 'Boston bull, Boston terrier',
196: 'miniature schnauzer',
197: 'giant schnauzer',
198: 'standard schnauzer',
199: 'Scotch terrier, Scottish terrier, Scottie',
200: 'Tibetan terrier, chrysanthemum dog',
201: 'silky terrier, Sydney silky',
202: 'soft-coated wheaten terrier',
203: 'West Highland white terrier',
204: 'Lhasa, Lhasa apso',
205: 'flat-coated retriever',
206: 'curly-coated retriever',
207: 'golden retriever',
208: 'Labrador retriever',
209: 'Chesapeake Bay retriever',
210: 'German short-haired pointer',
211: 'vizsla, Hungarian pointer',
212: 'English setter',
213: 'Irish setter, red setter',
214: 'Gordon setter',
215: 'Brittany spaniel',
216: 'clumber, clumber spaniel',
217: 'English springer, English springer spaniel',
218: 'Welsh springer spaniel',
219: 'cocker spaniel, English cocker spaniel, cocker',
220: 'Sussex spaniel',
221: 'Irish water spaniel',
222: 'kuvasz',
223: 'schipperke',
224: 'groenendael',
225: 'malinois',
226: 'briard',
227: 'kelpie',
228: 'komondor',
229: 'Old English sheepdog, bobtail',
230: 'Shetland sheepdog, Shetland sheep dog, Shetland',
231: 'collie',
232: 'Border collie',
233: 'Bouvier des Flandres, Bouviers des Flandres',
234: 'Rottweiler',
235: 'German shepherd, German shepherd dog, German police dog, alsatian',
236: 'Doberman, Doberman pinscher',
237: 'miniature pinscher',
238: 'Greater Swiss Mountain dog',
239: 'Bernese mountain dog',
240: 'Appenzeller',
241: 'EntleBucher',
242: 'boxer',
243: 'bull mastiff',
244: 'Tibetan mastiff',
245: 'French bulldog',
246: 'Great Dane',
247: 'Saint Bernard, St Bernard',
248: 'Eskimo dog, husky',
249: 'malamute, malemute, Alaskan malamute',
250: 'Siberian husky',
251: 'dalmatian, coach dog, carriage dog',
252: 'affenpinscher, monkey pinscher, monkey dog',
253: 'basenji',
254: 'pug, pug-dog',
255: 'Leonberg',
256: 'Newfoundland, Newfoundland dog',
257: 'Great Pyrenees',
258: 'Samoyed, Samoyede',
259: 'Pomeranian',
260: 'chow, chow chow',
261: 'keeshond',
262: 'Brabancon griffon',
263: 'Pembroke, Pembroke Welsh corgi',
264: 'Cardigan, Cardigan Welsh corgi',
265: 'toy poodle',
266: 'miniature poodle',
267: 'standard poodle',
268: 'Mexican hairless',
269: 'timber wolf, grey wolf, gray wolf, Canis lupus',
270: 'white wolf, Arctic wolf, Canis lupus tundrarum',
271: 'red wolf, maned wolf, Canis rufus, Canis niger',
272: 'coyote, prairie wolf, brush wolf, Canis latrans',
273: 'dingo, warrigal, warragal, Canis dingo',
274: 'dhole, Cuon alpinus',
275: 'African hunting dog, hyena dog, Cape hunting dog, Lycaon pictus',
276: 'hyena, hyaena',
277: 'red fox, Vulpes vulpes',
278: 'kit fox, Vulpes macrotis',
279: 'Arctic fox, white fox, Alopex lagopus',
280: 'grey fox, gray fox, Urocyon cinereoargenteus',
281: 'tabby, tabby cat',
282: 'tiger cat',
283: 'Persian cat',
284: 'Siamese cat, Siamese',
285: 'Egyptian cat',
286: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
287: 'lynx, catamount',
288: 'leopard, Panthera pardus',
289: 'snow leopard, ounce, Panthera uncia',
290: 'jaguar, panther, Panthera onca, Felis onca',
291: 'lion, king of beasts, Panthera leo',
292: 'tiger, Panthera tigris',
293: 'cheetah, chetah, Acinonyx jubatus',
294: 'brown bear, bruin, Ursus arctos',
295: 'American black bear, black bear, Ursus americanus, Euarctos americanus',
296: 'ice bear, polar bear, Ursus Maritimus, Thalarctos maritimus',
297: 'sloth bear, Melursus ursinus, Ursus ursinus',
298: 'mongoose',
299: 'meerkat, mierkat',
300: 'tiger beetle',
301: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle',
302: 'ground beetle, carabid beetle',
303: 'long-horned beetle, longicorn, longicorn beetle',
304: 'leaf beetle, chrysomelid',
305: 'dung beetle',
306: 'rhinoceros beetle',
307: 'weevil',
308: 'fly',
309: 'bee',
310: 'ant, emmet, pismire',
311: 'grasshopper, hopper',
312: 'cricket',
313: 'walking stick, walkingstick, stick insect',
314: 'cockroach, roach',
315: 'mantis, mantid',
316: 'cicada, cicala',
317: 'leafhopper',
318: 'lacewing, lacewing fly',
319: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
320: 'damselfly',
321: 'admiral',
322: 'ringlet, ringlet butterfly',
323: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
324: 'cabbage butterfly',
325: 'sulphur butterfly, sulfur butterfly',
326: 'lycaenid, lycaenid butterfly',
327: 'starfish, sea star',
328: 'sea urchin',
329: 'sea cucumber, holothurian',
330: 'wood rabbit, cottontail, cottontail rabbit',
331: 'hare',
332: 'Angora, Angora rabbit',
333: 'hamster',
334: 'porcupine, hedgehog',
335: 'fox squirrel, eastern fox squirrel, Sciurus niger',
336: 'marmot',
337: 'beaver',
338: 'guinea pig, Cavia cobaya',
339: 'sorrel',
340: 'zebra',
341: 'hog, pig, grunter, squealer, Sus scrofa',
342: 'wild boar, boar, Sus scrofa',
343: 'warthog',
344: 'hippopotamus, hippo, river horse, Hippopotamus amphibius',
345: 'ox',
346: 'water buffalo, water ox, Asiatic buffalo, Bubalus bubalis',
347: 'bison',
348: 'ram, tup',
349: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
350: 'ibex, Capra ibex',
351: 'hartebeest',
352: 'impala, Aepyceros melampus',
353: 'gazelle',
354: 'Arabian camel, dromedary, Camelus dromedarius',
355: 'llama',
356: 'weasel',
357: 'mink',
358: 'polecat, fitch, foulmart, foumart, Mustela putorius',
359: 'black-footed ferret, ferret, Mustela nigripes',
360: 'otter',
361: 'skunk, polecat, wood pussy',
362: 'badger',
363: 'armadillo',
364: 'three-toed sloth, ai, Bradypus tridactylus',
365: 'orangutan, orang, orangutang, Pongo pygmaeus',
366: 'gorilla, Gorilla gorilla',
367: 'chimpanzee, chimp, Pan troglodytes',
368: 'gibbon, Hylobates lar',
369: 'siamang, Hylobates syndactylus, Symphalangus syndactylus',
370: 'guenon, guenon monkey',
371: 'patas, hussar monkey, Erythrocebus patas',
372: 'baboon',
373: 'macaque',
374: 'langur',
375: 'colobus, colobus monkey',
376: 'proboscis monkey, Nasalis larvatus',
377: 'marmoset',
378: 'capuchin, ringtail, Cebus capucinus',
379: 'howler monkey, howler',
380: 'titi, titi monkey',
381: 'spider monkey, Ateles geoffroyi',
382: 'squirrel monkey, Saimiri sciureus',
383: 'Madagascar cat, ring-tailed lemur, Lemur catta',
384: 'indri, indris, Indri indri, Indri brevicaudatus',
385: 'Indian elephant, Elephas maximus',
386: 'African elephant, Loxodonta africana',
387: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens',
388: 'giant panda, panda, panda bear, coon bear, Ailuropoda melanoleuca',
389: 'barracouta, snoek',
390: 'eel',
391: 'coho, cohoe, coho salmon, blue jack, silver salmon, Oncorhynchus kisutch',
392: 'rock beauty, Holocanthus tricolor',
393: 'anemone fish',
394: 'sturgeon',
395: 'gar, garfish, garpike, billfish, Lepisosteus osseus',
396: 'lionfish',
397: 'puffer, pufferfish, blowfish, globefish',
398: 'abacus',
399: 'abaya',
400: "academic gown, academic robe, judge's robe",
401: 'accordion, piano accordion, squeeze box',
402: 'acoustic guitar',
403: 'aircraft carrier, carrier, flattop, attack aircraft carrier',
404: 'airliner',
405: 'airship, dirigible',
406: 'altar',
407: 'ambulance',
408: 'amphibian, amphibious vehicle',
409: 'analog clock',
410: 'apiary, bee house',
411: 'apron',
412: 'ashcan, trash can, garbage can, wastebin, ash bin, ash-bin, ashbin, dustbin, trash barrel, trash bin',
413: 'assault rifle, assault gun',
414: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
415: 'bakery, bakeshop, bakehouse',
416: 'balance beam, beam',
417: 'balloon',
418: 'ballpoint, ballpoint pen, ballpen, Biro',
419: 'Band Aid',
420: 'banjo',
421: 'bannister, banister, balustrade, balusters, handrail',
422: 'barbell',
423: 'barber chair',
424: 'barbershop',
425: 'barn',
426: 'barometer',
427: 'barrel, cask',
428: 'barrow, garden cart, lawn cart, wheelbarrow',
429: 'baseball',
430: 'basketball',
431: 'bassinet',
432: 'bassoon',
433: 'bathing cap, swimming cap',
434: 'bath towel',
435: 'bathtub, bathing tub, bath, tub',
436: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
437: 'beacon, lighthouse, beacon light, pharos',
438: 'beaker',
439: 'bearskin, busby, shako',
440: 'beer bottle',
441: 'beer glass',
442: 'bell cote, bell cot',
443: 'bib',
444: 'bicycle-built-for-two, tandem bicycle, tandem',
445: 'bikini, two-piece',
446: 'binder, ring-binder',
447: 'binoculars, field glasses, opera glasses',
448: 'birdhouse',
449: 'boathouse',
450: 'bobsled, bobsleigh, bob',
451: 'bolo tie, bolo, bola tie, bola',
452: 'bonnet, poke bonnet',
453: 'bookcase',
454: 'bookshop, bookstore, bookstall',
455: 'bottlecap',
456: 'bow',
457: 'bow tie, bow-tie, bowtie',
458: 'brass, memorial tablet, plaque',
459: 'brassiere, bra, bandeau',
460: 'breakwater, groin, groyne, mole, bulwark, seawall, jetty',
461: 'breastplate, aegis, egis',
462: 'broom',
463: 'bucket, pail',
464: 'buckle',
465: 'bulletproof vest',
466: 'bullet train, bullet',
467: 'butcher shop, meat market',
468: 'cab, hack, taxi, taxicab',
469: 'caldron, cauldron',
470: 'candle, taper, wax light',
471: 'cannon',
472: 'canoe',
473: 'can opener, tin opener',
474: 'cardigan',
475: 'car mirror',
476: 'carousel, carrousel, merry-go-round, roundabout, whirligig',
477: "carpenter's kit, tool kit",
478: 'carton',
479: 'car wheel',
480: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
481: 'cassette',
482: 'cassette player',
483: 'castle',
484: 'catamaran',
485: 'CD player',
486: 'cello, violoncello',
487: 'cellular telephone, cellular phone, cellphone, cell, mobile phone',
488: 'chain',
489: 'chainlink fence',
490: 'chain mail, ring mail, mail, chain armor, chain armour, ring armor, ring armour',
491: 'chain saw, chainsaw',
492: 'chest',
493: 'chiffonier, commode',
494: 'chime, bell, gong',
495: 'china cabinet, china closet',
496: 'Christmas stocking',
497: 'church, church building',
498: 'cinema, movie theater, movie theatre, movie house, picture palace',
499: 'cleaver, meat cleaver, chopper',
500: 'cliff dwelling',
501: 'cloak',
502: 'clog, geta, patten, sabot',
503: 'cocktail shaker',
504: 'coffee mug',
505: 'coffeepot',
506: 'coil, spiral, volute, whorl, helix',
507: 'combination lock',
508: 'computer keyboard, keypad',
509: 'confectionery, confectionary, candy store',
510: 'container ship, containership, container vessel',
511: 'convertible',
512: 'corkscrew, bottle screw',
513: 'cornet, horn, trumpet, trump',
514: 'cowboy boot',
515: 'cowboy hat, ten-gallon hat',
516: 'cradle',
517: 'crane',
518: 'crash helmet',
519: 'crate',
520: 'crib, cot',
521: 'Crock Pot',
522: 'croquet ball',
523: 'crutch',
524: 'cuirass',
525: 'dam, dike, dyke',
526: 'desk',
527: 'desktop computer',
528: 'dial telephone, dial phone',
529: 'diaper, nappy, napkin',
530: 'digital clock',
531: 'digital watch',
532: 'dining table, board',
533: 'dishrag, dishcloth',
534: 'dishwasher, dish washer, dishwashing machine',
535: 'disk brake, disc brake',
536: 'dock, dockage, docking facility',
537: 'dogsled, dog sled, dog sleigh',
538: 'dome',
539: 'doormat, welcome mat',
540: 'drilling platform, offshore rig',
541: 'drum, membranophone, tympan',
542: 'drumstick',
543: 'dumbbell',
544: 'Dutch oven',
545: 'electric fan, blower',
546: 'electric guitar',
547: 'electric locomotive',
548: 'entertainment center',
549: 'envelope',
550: 'espresso maker',
551: 'face powder',
552: 'feather boa, boa',
553: 'file, file cabinet, filing cabinet',
554: 'fireboat',
555: 'fire engine, fire truck',
556: 'fire screen, fireguard',
557: 'flagpole, flagstaff',
558: 'flute, transverse flute',
559: 'folding chair',
560: 'football helmet',
561: 'forklift',
562: 'fountain',
563: 'fountain pen',
564: 'four-poster',
565: 'freight car',
566: 'French horn, horn',
567: 'frying pan, frypan, skillet',
568: 'fur coat',
569: 'garbage truck, dustcart',
570: 'gasmask, respirator, gas helmet',
571: 'gas pump, gasoline pump, petrol pump, island dispenser',
572: 'goblet',
573: 'go-kart',
574: 'golf ball',
575: 'golfcart, golf cart',
576: 'gondola',
577: 'gong, tam-tam',
578: 'gown',
579: 'grand piano, grand',
580: 'greenhouse, nursery, glasshouse',
581: 'grille, radiator grille',
582: 'grocery store, grocery, food market, market',
583: 'guillotine',
584: 'hair slide',
585: 'hair spray',
586: 'half track',
587: 'hammer',
588: 'hamper',
589: 'hand blower, blow dryer, blow drier, hair dryer, hair drier',
590: 'hand-held computer, hand-held microcomputer',
591: 'handkerchief, hankie, hanky, hankey',
592: 'hard disc, hard disk, fixed disk',
593: 'harmonica, mouth organ, harp, mouth harp',
594: 'harp',
595: 'harvester, reaper',
596: 'hatchet',
597: 'holster',
598: 'home theater, home theatre',
599: 'honeycomb',
600: 'hook, claw',
601: 'hoopskirt, crinoline',
602: 'horizontal bar, high bar',
603: 'horse cart, horse-cart',
604: 'hourglass',
605: 'iPod',
606: 'iron, smoothing iron',
607: "jack-o'-lantern",
608: 'jean, blue jean, denim',
609: 'jeep, landrover',
610: 'jersey, T-shirt, tee shirt',
611: 'jigsaw puzzle',
612: 'jinrikisha, ricksha, rickshaw',
613: 'joystick',
614: 'kimono',
615: 'knee pad',
616: 'knot',
617: 'lab coat, laboratory coat',
618: 'ladle',
619: 'lampshade, lamp shade',
620: 'laptop, laptop computer',
621: 'lawn mower, mower',
622: 'lens cap, lens cover',
623: 'letter opener, paper knife, paperknife',
624: 'library',
625: 'lifeboat',
626: 'lighter, light, igniter, ignitor',
627: 'limousine, limo',
628: 'liner, ocean liner',
629: 'lipstick, lip rouge',
630: 'Loafer',
631: 'lotion',
632: 'loudspeaker, speaker, speaker unit, loudspeaker system, speaker system',
633: "loupe, jeweler's loupe",
634: 'lumbermill, sawmill',
635: 'magnetic compass',
636: 'mailbag, postbag',
637: 'mailbox, letter box',
638: 'maillot',
639: 'maillot, tank suit',
640: 'manhole cover',
641: 'maraca',
642: 'marimba, xylophone',
643: 'mask',
644: 'matchstick',
645: 'maypole',
646: 'maze, labyrinth',
647: 'measuring cup',
648: 'medicine chest, medicine cabinet',
649: 'megalith, megalithic structure',
650: 'microphone, mike',
651: 'microwave, microwave oven',
652: 'military uniform',
653: 'milk can',
654: 'minibus',
655: 'miniskirt, mini',
656: 'minivan',
657: 'missile',
658: 'mitten',
659: 'mixing bowl',
660: 'mobile home, manufactured home',
661: 'Model T',
662: 'modem',
663: 'monastery',
664: 'monitor',
665: 'moped',
666: 'mortar',
667: 'mortarboard',
668: 'mosque',
669: 'mosquito net',
670: 'motor scooter, scooter',
671: 'mountain bike, all-terrain bike, off-roader',
672: 'mountain tent',
673: 'mouse, computer mouse',
674: 'mousetrap',
675: 'moving van',
676: 'muzzle',
677: 'nail',
678: 'neck brace',
679: 'necklace',
680: 'nipple',
681: 'notebook, notebook computer',
682: 'obelisk',
683: 'oboe, hautboy, hautbois',
684: 'ocarina, sweet potato',
685: 'odometer, hodometer, mileometer, milometer',
686: 'oil filter',
687: 'organ, pipe organ',
688: 'oscilloscope, scope, cathode-ray oscilloscope, CRO',
689: 'overskirt',
690: 'oxcart',
691: 'oxygen mask',
692: 'packet',
693: 'paddle, boat paddle',
694: 'paddlewheel, paddle wheel',
695: 'padlock',
696: 'paintbrush',
697: "pajama, pyjama, pj's, jammies",
698: 'palace',
699: 'panpipe, pandean pipe, syrinx',
700: 'paper towel',
701: 'parachute, chute',
702: 'parallel bars, bars',
703: 'park bench',
704: 'parking meter',
705: 'passenger car, coach, carriage',
706: 'patio, terrace',
707: 'pay-phone, pay-station',
708: 'pedestal, plinth, footstall',
709: 'pencil box, pencil case',
710: 'pencil sharpener',
711: 'perfume, essence',
712: 'Petri dish',
713: 'photocopier',
714: 'pick, plectrum, plectron',
715: 'pickelhaube',
716: 'picket fence, paling',
717: 'pickup, pickup truck',
718: 'pier',
719: 'piggy bank, penny bank',
720: 'pill bottle',
721: 'pillow',
722: 'ping-pong ball',
723: 'pinwheel',
724: 'pirate, pirate ship',
725: 'pitcher, ewer',
726: "plane, carpenter's plane, woodworking plane",
727: 'planetarium',
728: 'plastic bag',
729: 'plate rack',
730: 'plow, plough',
731: "plunger, plumber's helper",
732: 'Polaroid camera, Polaroid Land camera',
733: 'pole',
734: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria',
735: 'poncho',
736: 'pool table, billiard table, snooker table',
737: 'pop bottle, soda bottle',
738: 'pot, flowerpot',
739: "potter's wheel",
740: 'power drill',
741: 'prayer rug, prayer mat',
742: 'printer',
743: 'prison, prison house',
744: 'projectile, missile',
745: 'projector',
746: 'puck, hockey puck',
747: 'punching bag, punch bag, punching ball, punchball',
748: 'purse',
749: 'quill, quill pen',
750: 'quilt, comforter, comfort, puff',
751: 'racer, race car, racing car',
752: 'racket, racquet',
753: 'radiator',
754: 'radio, wireless',
755: 'radio telescope, radio reflector',
756: 'rain barrel',
757: 'recreational vehicle, RV, R.V.',
758: 'reel',
759: 'reflex camera',
760: 'refrigerator, icebox',
761: 'remote control, remote',
762: 'restaurant, eating house, eating place, eatery',
763: 'revolver, six-gun, six-shooter',
764: 'rifle',
765: 'rocking chair, rocker',
766: 'rotisserie',
767: 'rubber eraser, rubber, pencil eraser',
768: 'rugby ball',
769: 'rule, ruler',
770: 'running shoe',
771: 'safe',
772: 'safety pin',
773: 'saltshaker, salt shaker',
774: 'sandal',
775: 'sarong',
776: 'sax, saxophone',
777: 'scabbard',
778: 'scale, weighing machine',
779: 'school bus',
780: 'schooner',
781: 'scoreboard',
782: 'screen, CRT screen',
783: 'screw',
784: 'screwdriver',
785: 'seat belt, seatbelt',
786: 'sewing machine',
787: 'shield, buckler',
788: 'shoe shop, shoe-shop, shoe store',
789: 'shoji',
790: 'shopping basket',
791: 'shopping cart',
792: 'shovel',
793: 'shower cap',
794: 'shower curtain',
795: 'ski',
796: 'ski mask',
797: 'sleeping bag',
798: 'slide rule, slipstick',
799: 'sliding door',
800: 'slot, one-armed bandit',
801: 'snorkel',
802: 'snowmobile',
803: 'snowplow, snowplough',
804: 'soap dispenser',
805: 'soccer ball',
806: 'sock',
807: 'solar dish, solar collector, solar furnace',
808: 'sombrero',
809: 'soup bowl',
810: 'space bar',
811: 'space heater',
812: 'space shuttle',
813: 'spatula',
814: 'speedboat',
815: "spider web, spider's web",
816: 'spindle',
817: 'sports car, sport car',
818: 'spotlight, spot',
819: 'stage',
820: 'steam locomotive',
821: 'steel arch bridge',
822: 'steel drum',
823: 'stethoscope',
824: 'stole',
825: 'stone wall',
826: 'stopwatch, stop watch',
827: 'stove',
828: 'strainer',
829: 'streetcar, tram, tramcar, trolley, trolley car',
830: 'stretcher',
831: 'studio couch, day bed',
832: 'stupa, tope',
833: 'submarine, pigboat, sub, U-boat',
834: 'suit, suit of clothes',
835: 'sundial',
836: 'sunglass',
837: 'sunglasses, dark glasses, shades',
838: 'sunscreen, sunblock, sun blocker',
839: 'suspension bridge',
840: 'swab, swob, mop',
841: 'sweatshirt',
842: 'swimming trunks, bathing trunks',
843: 'swing',
844: 'switch, electric switch, electrical switch',
845: 'syringe',
846: 'table lamp',
847: 'tank, army tank, armored combat vehicle, armoured combat vehicle',
848: 'tape player',
849: 'teapot',
850: 'teddy, teddy bear',
851: 'television, television system',
852: 'tennis ball',
853: 'thatch, thatched roof',
854: 'theater curtain, theatre curtain',
855: 'thimble',
856: 'thresher, thrasher, threshing machine',
857: 'throne',
858: 'tile roof',
859: 'toaster',
860: 'tobacco shop, tobacconist shop, tobacconist',
861: 'toilet seat',
862: 'torch',
863: 'totem pole',
864: 'tow truck, tow car, wrecker',
865: 'toyshop',
866: 'tractor',
867: 'trailer truck, tractor trailer, trucking rig, rig, articulated lorry, semi',
868: 'tray',
869: 'trench coat',
870: 'tricycle, trike, velocipede',
871: 'trimaran',
872: 'tripod',
873: 'triumphal arch',
874: 'trolleybus, trolley coach, trackless trolley',
875: 'trombone',
876: 'tub, vat',
877: 'turnstile',
878: 'typewriter keyboard',
879: 'umbrella',
880: 'unicycle, monocycle',
881: 'upright, upright piano',
882: 'vacuum, vacuum cleaner',
883: 'vase',
884: 'vault',
885: 'velvet',
886: 'vending machine',
887: 'vestment',
888: 'viaduct',
889: 'violin, fiddle',
890: 'volleyball',
891: 'waffle iron',
892: 'wall clock',
893: 'wallet, billfold, notecase, pocketbook',
894: 'wardrobe, closet, press',
895: 'warplane, military plane',
896: 'washbasin, handbasin, washbowl, lavabo, wash-hand basin',
897: 'washer, automatic washer, washing machine',
898: 'water bottle',
899: 'water jug',
900: 'water tower',
901: 'whiskey jug',
902: 'whistle',
903: 'wig',
904: 'window screen',
905: 'window shade',
906: 'Windsor tie',
907: 'wine bottle',
908: 'wing',
909: 'wok',
910: 'wooden spoon',
911: 'wool, woolen, woollen',
912: 'worm fence, snake fence, snake-rail fence, Virginia fence',
913: 'wreck',
914: 'yawl',
915: 'yurt',
916: 'web site, website, internet site, site',
917: 'comic book',
918: 'crossword puzzle, crossword',
919: 'street sign',
920: 'traffic light, traffic signal, stoplight',
921: 'book jacket, dust cover, dust jacket, dust wrapper',
922: 'menu',
923: 'plate',
924: 'guacamole',
925: 'consomme',
926: 'hot pot, hotpot',
927: 'trifle',
928: 'ice cream, icecream',
929: 'ice lolly, lolly, lollipop, popsicle',
930: 'French loaf',
931: 'bagel, beigel',
932: 'pretzel',
933: 'cheeseburger',
934: 'hotdog, hot dog, red hot',
935: 'mashed potato',
936: 'head cabbage',
937: 'broccoli',
938: 'cauliflower',
939: 'zucchini, courgette',
940: 'spaghetti squash',
941: 'acorn squash',
942: 'butternut squash',
943: 'cucumber, cuke',
944: 'artichoke, globe artichoke',
945: 'bell pepper',
946: 'cardoon',
947: 'mushroom',
948: 'Granny Smith',
949: 'strawberry',
950: 'orange',
951: 'lemon',
952: 'fig',
953: 'pineapple, ananas',
954: 'banana',
955: 'jackfruit, jak, jack',
956: 'custard apple',
957: 'pomegranate',
958: 'hay',
959: 'carbonara',
960: 'chocolate sauce, chocolate syrup',
961: 'dough',
962: 'meat loaf, meatloaf',
963: 'pizza, pizza pie',
964: 'potpie',
965: 'burrito',
966: 'red wine',
967: 'espresso',
968: 'cup',
969: 'eggnog',
970: 'alp',
971: 'bubble',
972: 'cliff, drop, drop-off',
973: 'coral reef',
974: 'geyser',
975: 'lakeside, lakeshore',
976: 'promontory, headland, head, foreland',
977: 'sandbar, sand bar',
978: 'seashore, coast, seacoast, sea-coast',
979: 'valley, vale',
980: 'volcano',
981: 'ballplayer, baseball player',
982: 'groom, bridegroom',
983: 'scuba diver',
984: 'rapeseed',
985: 'daisy',
986: "yellow lady's slipper, yellow lady-slipper, Cypripedium calceolus, Cypripedium parviflorum",
987: 'corn',
988: 'acorn',
989: 'hip, rose hip, rosehip',
990: 'buckeye, horse chestnut, conker',
991: 'coral fungus',
992: 'agaric',
993: 'gyromitra',
994: 'stinkhorn, carrion fungus',
995: 'earthstar',
996: 'hen-of-the-woods, hen of the woods, Polyporus frondosus, Grifola frondosa',
997: 'bolete',
998: 'ear, spike, capitulum',
999: 'toilet tissue, toilet paper, bathroom tissue'} | astmt-master | fblib/util/db_info/imagenet_categ.py |
astmt-master | fblib/util/db_info/__init__.py |
|
astmt-master | fblib/util/model_resources/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
# ---- Public functions
def compute_gflops(net, in_shape=(1, 3, 224, 224), tasks=None):
net = add_flops_counting_methods(net)
net = net.cuda().train()
net.start_flops_count()
inputs = torch.rand(in_shape).requires_grad_().cuda()
if not tasks:
_ = net(inputs)
else:
_ = net.forward(inputs, tasks)
gflops = net.compute_average_flops_cost() / 1e9 / 2
return gflops
def add_flops_counting_methods(net_main_module):
"""Adds flops counting functions to an existing model. After that
the flops count should be activated and the model should be run on an input
image.
Example:
fcn = add_flops_counting_methods(fcn)
fcn = fcn.cuda().train()
fcn.start_flops_count()
_ = fcn(batch)
fcn.compute_average_flops_cost() / 1e9 / 2 # Result in GFLOPs per image in batch
Important: dividing by 2 only works for resnet models -- see below for the details
of flops computation.
Attention: we are counting multiply-add as two flops in this work, because in
most resnet models convolutions are bias-free (BN layers act as bias there)
and it makes sense to count muliply and add as separate flops therefore.
This is why in the above example we divide by 2 in order to be consistent with
most modern benchmarks. For example in "Spatially Adaptive Computation Time for Residual
Networks" by Figurnov et al multiply-add was counted as two flops.
This module computes the average flops which is necessary for dynamic networks which
have different number of executed layers. For static networks it is enough to run the network
once and get statistics (above example).
Implementation:
The module works by adding batch_count to the main module which tracks the sum
of all batch sizes that were run through the network.
Also each convolutional layer of the network tracks the overall number of flops
performed.
The parameters are updated with the help of registered hook-functions which
are being called each time the respective layer is executed.
Parameters
----------
net_main_module : torch.nn.Module
Main module containing network
Returns
-------
net_main_module : torch.nn.Module
Updated main module with new methods/attributes that are used
to compute flops.
"""
# adding additional methods to the existing module object,
# this is done this way so that each function has access to self object
net_main_module.start_flops_count = start_flops_count.__get__(net_main_module)
net_main_module.stop_flops_count = stop_flops_count.__get__(net_main_module)
net_main_module.reset_flops_count = reset_flops_count.__get__(net_main_module)
net_main_module.compute_average_flops_cost = compute_average_flops_cost.__get__(net_main_module)
net_main_module.reset_flops_count()
# Adding varialbles necessary for masked flops computation
net_main_module.apply(add_flops_mask_variable_or_reset)
return net_main_module
def compute_average_flops_cost(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Returns current mean flops consumption per image.
"""
batches_count = self.__batch_counter__
if batches_count == 0:
print('Please divide manually with the batch size')
batches_count = 1
flops_sum = 0
for module in self.modules():
if is_supported_instance(module):
flops_sum += module.__flops__
return flops_sum / batches_count
def start_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Activates the computation of mean flops consumption per image.
Call it before you run the network.
"""
add_batch_counter_hook_function(self)
self.apply(add_flops_counter_hook_function)
def stop_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Stops computing the mean flops consumption per image.
Call whenever you want to pause the computation.
"""
remove_batch_counter_hook_function(self)
self.apply(remove_flops_counter_hook_function)
def reset_flops_count(self):
"""
A method that will be available after add_flops_counting_methods() is called
on a desired net object.
Resets statistics computed so far.
"""
add_batch_counter_variables_or_reset(self)
self.apply(add_flops_counter_variable_or_reset)
def add_flops_mask(module, mask):
def add_flops_mask_func(module):
if isinstance(module, torch.nn.Conv2d):
module.__mask__ = mask
module.apply(add_flops_mask_func)
def remove_flops_mask(module):
module.apply(add_flops_mask_variable_or_reset)
# ---- Internal functions
def is_supported_instance(module):
if isinstance(module, torch.nn.Conv2d) \
or isinstance(module, torch.nn.Linear) \
or isinstance(module, torch.nn.Upsample):
return True
return False
def empty_flops_counter_hook(module, input, output):
module.__flops__ += 0
def conv_flops_counter_hook(conv_module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
batch_size = input.shape[0]
output_height, output_width = output.shape[2:]
kernel_height, kernel_width = conv_module.kernel_size
in_channels = conv_module.in_channels
out_channels = conv_module.out_channels
# We count multiply-add as 2 flops
if conv_module.groups == 1:
# Normal convolution
conv_per_position_flops = 2 * kernel_height * kernel_width * in_channels * out_channels
else:
# Grouped convolution
d_in = in_channels // conv_module.groups
d_out = out_channels // conv_module.groups
conv_per_position_flops = 2 * kernel_height * kernel_width * d_in * d_out * conv_module.groups
active_elements_count = batch_size * output_height * output_width
if conv_module.__mask__ is not None:
# (b, 1, h, w)
flops_mask = conv_module.__mask__.expand(batch_size, 1, output_height, output_width)
active_elements_count = flops_mask.sum()
overall_conv_flops = conv_per_position_flops * active_elements_count
bias_flops = 0
if conv_module.bias is not None:
bias_flops = out_channels * active_elements_count
overall_flops = overall_conv_flops + bias_flops
conv_module.__flops__ += overall_flops
def upsample_flops_counter_hook(module, input, output):
output_size = output[0]
batch_size = output_size.shape[0]
output_elements_count = batch_size
for val in output_size.shape[1:]:
output_elements_count *= val
module.__flops__ += output_elements_count
def linear_flops_counter_hook(module, input, output):
input = input[0]
batch_size = input.shape[0]
module.__flops__ += batch_size * input.shape[1] * output.shape[1]
def batch_counter_hook(module, input, output):
# Can have multiple inputs, getting the first one
input = input[0]
# print(input)
batch_size = input.shape[0]
module.__batch_counter__ += batch_size
def add_batch_counter_variables_or_reset(module):
module.__batch_counter__ = 0
def add_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
return
handle = module.register_forward_hook(batch_counter_hook)
module.__batch_counter_handle__ = handle
def remove_batch_counter_hook_function(module):
if hasattr(module, '__batch_counter_handle__'):
module.__batch_counter_handle__.remove()
del module.__batch_counter_handle__
def add_flops_counter_variable_or_reset(module):
if is_supported_instance(module):
module.__flops__ = 0
def add_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
return
if isinstance(module, torch.nn.Conv2d):
handle = module.register_forward_hook(conv_flops_counter_hook)
elif isinstance(module, torch.nn.Linear):
handle = module.register_forward_hook(linear_flops_counter_hook)
elif isinstance(module, torch.nn.Upsample):
handle = module.register_forward_hook(upsample_flops_counter_hook)
else:
handle = module.register_forward_hook(empty_flops_counter_hook)
module.__flops_handle__ = handle
def remove_flops_counter_hook_function(module):
if is_supported_instance(module):
if hasattr(module, '__flops_handle__'):
module.__flops_handle__.remove()
del module.__flops_handle__
# Also being run in the initialization
def add_flops_mask_variable_or_reset(module):
if is_supported_instance(module):
module.__mask__ = None
| astmt-master | fblib/util/model_resources/flops.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| astmt-master | fblib/util/model_resources/num_parameters.py |
astmt-master | fblib/util/dense_predict/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def lr_poly(base_lr, iter_, max_iter=100, power=0.9):
return base_lr * ((1 - float(iter_) / max_iter) ** power)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
| astmt-master | fblib/util/dense_predict/utils.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch
import torch.nn as nn
def traverse_graph(var):
"""
Args:
var: output Variable
"""
seen = set()
var_lst = []
def add_nodes(var):
if var not in seen:
if hasattr(var, 'variable'):
u = var.variable
if isinstance(u, nn.Parameter):
var_lst.append(u)
seen.add(var)
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
add_nodes(u[0])
# handle multiple outputs
if isinstance(var, tuple):
for v in var:
add_nodes(v.grad_fn)
else:
add_nodes(var.grad_fn)
return var_lst
def make_closure(loss, net):
def closure():
used_vars = traverse_graph(loss)
loss.backward()
for p in net.parameters():
exists = False
for v in used_vars:
exists = (p is v)
if exists:
break
if not exists:
p.grad = None
return loss
return closure
def make_closure_fast(loss, net):
def closure():
used_vars = set(traverse_graph(loss))
loss.backward()
for p in net.parameters():
if p not in used_vars:
p.grad = None
return loss
return closure
class MWENet(nn.Module):
def __init__(self):
super(MWENet, self).__init__()
self.a = nn.Parameter(torch.rand(1))
self.b = nn.Parameter(torch.rand(1))
self.c = nn.Parameter(torch.rand(1))
def forward_b(self, x):
x = self.a * x
x = x ** self.b
return x
def forward_c(self, x):
x = self.a * x
x = x ** self.c
return x
def print_params(self, txt='Before'):
print('{0}: a: {1:.7f}, b: {2:.7f}, c: {3:.7f}'.format(
txt, self.a[0].detach().numpy(), self.b[0].detach().numpy(), self.c[0].detach().numpy()))
def perform_first_iter(net, optimizer, x):
out_b = net.forward_b(x)
out_c = net.forward_c(x)
loss = (1 - out_b) + (2 - out_c)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_default_optimizer():
print('\n Using default optimizer. All parameters should change')
x = torch.rand(1, requires_grad=True)
net = MWENet()
optimizer = torch.optim.SGD(net.parameters(), lr=0.01, momentum=0.99, weight_decay=0.001)
# First backward to get some momentum going
perform_first_iter(net, optimizer, x)
# Without modified optimizer
out_b = net.forward_b(x)
loss = (1 - out_b)
# Before
net.print_params()
optimizer.zero_grad()
loss.backward()
optimizer.step()
# After: c must get updated without being part of the graph
net.print_params('After ')
def test_modified_optimizer():
print('\n Using modified optimizer. parameter c should not change')
x = torch.rand(1, requires_grad=True)
net = MWENet()
optimizer = torch.optim.SGD(net.parameters(), lr=0.001, momentum=0.99, weight_decay=0.0001)
# First backward to get some momentum going
perform_first_iter(net, optimizer, x)
# With modified optimizer
out_b = net.forward_b(x)
loss = (1 - out_b)
# Before
net.print_params()
optimizer.zero_grad()
optimizer.step(closure=make_closure(loss, net))
# After: c SHOULD NOT get updated because it's not part of the graph
net.print_params('After ')
if __name__ == '__main__':
test_default_optimizer()
test_modified_optimizer()
| astmt-master | fblib/util/optimizer_mtl/select_used_modules.py |
astmt-master | fblib/util/optimizer_mtl/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
from fblib.util.mypath import Path
import numpy as np
import torch.utils.data as data
import cv2
class FSVGTA(data.Dataset):
def __init__(self,
root=Path.db_root_dir('FSV'),
split='test',
mini=True,
transform=None,
retname=True,
overfit=False,
do_semseg=False,
do_albedo=False,
do_depth=False,
prune_rare_classes=True,
):
self.root = root
self.transform = transform
self.prune = []
if prune_rare_classes:
self.prune = [1, 4, 5, 6, 7]
self.split = split
self.retname = retname
# Original Images
self.im_ids = []
self.images = []
_image_dir = os.path.join(root, 'gta_' + split)
# Semantic segmentation
self.do_semseg = do_semseg
self.semsegs = []
# Albedo
self.do_albedo = do_albedo
self.albedos = []
# Depth
self.do_depth = do_depth
self.depths = []
# train/val/test splits are pre-cut
_splits_dir = os.path.join(root, 'gt_sets')
print("Initializing dataloader for FSV GTA {} set".format(self.split))
with open(os.path.join(os.path.join(_splits_dir, 'gta_' + self.split + '.txt')), "r") as f:
lines = f.read().splitlines()
if split == 'test' and mini:
lines = lines[0:len(lines):int(len(lines)/5000)]
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + "_final.webp")
# assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Semantic Segmentation
_semseg = os.path.join(_image_dir, line + "_object_id.png")
# assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
# Albedo
_albedo = os.path.join(_image_dir, line + "_albedo.webp")
# assert os.path.isfile(_albedo)
self.albedos.append(_albedo)
# Depth Estimation
_depth = os.path.join(_image_dir, line + "_disparity.webp")
# assert os.path.isfile(_depth)
self.depths.append(_depth)
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
if self.do_albedo:
assert (len(self.images) == len(self.albedos))
if self.do_depth:
assert (len(self.images) == len(self.depths))
# Uncomment to overfit to one image
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
# if index == 1102:
# print('hi')
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.do_albedo:
_albedo = self._load_albedo(index)
if _albedo.shape[:2] != _img.shape[:2]:
_depth = cv2.resize(_albedo, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['albedo'] = _albedo
if self.do_depth:
_depth = self._load_depth(index)
if _depth.shape[:2] != _img.shape[:2]:
_depth = cv2.resize(_depth, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['depth'] = _depth
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _load_img(self, index):
_img = cv2.imread(self.images[index])[:, :, ::-1].astype(np.float32)
return _img
def _load_semseg(self, index):
_semseg = cv2.imread(self.semsegs[index])[:, :, -1].astype(np.float32)
# Prune rare classes
if self.prune:
uniq = np.unique(_semseg)
for cls in self.prune:
if cls in uniq:
_semseg[_semseg == cls] = 0
_semseg = np.maximum(_semseg - 1, 0)
return _semseg
def _load_albedo(self, index):
_albedo = cv2.imread(self.albedos[index])[:, :, ::-1].astype(np.float32) / 255.
return _albedo
def _load_depth(self, index):
_depth = cv2.imread(self.depths[index])
_depth = (_depth[:, :, 0] * 256 * 256 + _depth[:, :, 1] * 256 + _depth[:, :, 2]).astype(np.float32) / 8192
return _depth
def __str__(self):
return 'FSV GTA Multitask (split=' + str(self.split) + ')'
| astmt-master | fblib/dataloaders/fsv.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import os.path
from pycocotools.coco import COCO
import torch.utils.data as data
from PIL import Image
import numpy as np
from fblib.util.mypath import Path
class CocoCaptions(data.Dataset):
"""`MS Coco Captions <http://mscoco.org/dataset/#captions-challenge2015>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
Example:
.. code:: python
import torchvision.datasets as dset
import torchvision.transforms as transforms
cap = dset.CocoCaptions(root = 'dir where images are',
annFile = 'json annotation file',
transform=transforms.ToTensor())
print('Number of samples: ', len(cap))
img, target = cap[3] # load 4th sample
print("Image Size: ", img.size())
print(target)
Output: ::
Number of samples: 82783
Image Size: (3L, 427L, 640L)
[u'A plane emitting smoke stream flying over a mountain.',
u'A plane darts across a bright blue sky behind a mountain covered in snow',
u'A plane leaves a contrail above the snowy mountain top.',
u'A mountain that has a plane flying overheard in the distance.',
u'A mountain view with a plume of smoke in the background']
"""
def __init__(self, root, annFile, transform=None, target_transform=None):
self.root = os.path.expanduser(root)
self.coco = COCO(annFile)
self.ids = list(self.coco.imgs.keys())
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is a list of captions for the image.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
anns = coco.loadAnns(ann_ids)
target = [ann['caption'] for ann in anns]
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.ids)
class CocoDetection(data.Dataset):
"""`MS Coco Captions <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
root (string): Root directory where images are downloaded to.
annFile (string): Path to json annotation file.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.ToTensor``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
def __init__(self, root, annFile, transform=None, target_transform=None):
self.root = root
self.coco = COCO(annFile)
self.ids = list(self.coco.imgs.keys())
self.transform = transform
self.target_transform = target_transform
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
target = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = Image.open(os.path.join(self.root, path)).convert('RGB')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self):
return len(self.ids)
class COCOSegmentation(data.Dataset):
"""`MS Coco Captions <http://mscoco.org/dataset/#detections-challenge2016>`_ Dataset.
Args:
split (string): Select split of the dataset, eg 'val2014' or 'train2014'
area_range (list): Select min and max size of the objects eg [500, float("inf")]
pascal_categories (boolean): Select only the categories of pascal
db_root (string): Root folder where the coco dataset is stored, folder containing annotation and images folders.
transform (callable, optional): A function/transform that takes in a sample
and returns a transformed version. E.g, ``transforms.ToTensor``
retname (boolean): Return metadata about the sample
"""
PASCAL_CAT_DICT = {'airplane': 1, 'bicycle': 2, 'bird': 3, 'boat': 4, 'bottle': 5,
'bus': 6, 'car': 7, 'cat': 8, 'chair': 9, 'cow': 10,
'dining table': 11, 'dog': 12, 'horse': 13, 'motorcycle': 14, 'person': 15,
'potted plant': 16, 'sheep': 17, 'couch': 18, 'train': 19, 'tv': 20}
def __init__(self,
split,
area_range=[],
only_pascal_categories=False,
mask_per_class=True,
db_root=Path.db_root_dir('COCO'),
n_samples=-1,
transform=None,
retname=True,
overfit=False
):
self.split = split
self.root = os.path.join(db_root, 'images', split)
annFile = os.path.join(db_root, 'annotations', 'instances_' + split + '.json')
self.coco = COCO(annFile)
self.pascal_cat_name = ['person', 'bird', 'cat', 'cow', 'dog', 'horse', 'sheep', 'airplane',
'bicycle', 'boat', 'bus', 'car', 'motorcycle', 'train', 'bottle', 'chair',
'dining table', 'potted plant', 'couch', 'tv']
self.only_pascal_categories = only_pascal_categories
if self.only_pascal_categories:
cat_ids = self.coco.getCatIds(catNms=self.pascal_cat_name)
else:
cat_ids = self.coco.getCatIds()
self.img_ids = list(self.coco.imgs.keys())
self.ids = self.coco.getAnnIds(imgIds=self.img_ids, areaRng=area_range, catIds=cat_ids)
self.transform = transform
self.area_range = area_range
self.cat_ids = cat_ids
self.mask_per_class = mask_per_class
self.retname = retname
if self.mask_per_class:
self._select_imgs()
if n_samples > 0:
if self.mask_per_class:
self.img_ids = list(self.img_ids)[:n_samples]
else:
self.ids = self.ids[:n_samples]
if overfit:
n_of = 64
self.img_ids = list(self.img_ids)[:n_of]
# Display stats
if self.mask_per_class:
print("Number of images: {:d}".format(len(self.img_ids)))
else:
print('Number of images: {:d}\nNumber of objects: {:d}'.format(len(self.coco.imgs), len(self.ids)))
def _select_imgs(self):
lst = []
for x in self.img_ids:
ids_area = self.coco.getAnnIds(imgIds=x, areaRng=self.area_range, catIds=self.cat_ids)
ids = self.coco.getAnnIds(imgIds=x, areaRng=[0, float('Inf')], catIds=self.cat_ids)
if ids_area and len(ids) == len(ids_area):
lst.append(x)
self.img_ids = lst
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: Tuple (image, target). target is the object returned by ``coco.loadAnns``.
"""
coco = self.coco
if self.mask_per_class:
img_id = self.img_ids[index]
ann_meta = []
for cat_id in self.cat_ids:
ids = coco.getAnnIds(imgIds=img_id, catIds=cat_id)
ann_meta.append(coco.loadAnns(ids))
cat_id = self.cat_ids
else:
ann_meta = coco.loadAnns(self.ids[index])
img_id = ann_meta[0]["image_id"]
cat_id = ann_meta[0]['category_id']
img_meta = coco.loadImgs(img_id)[0]
path = img_meta['file_name']
sample = {}
if self.retname:
sample['meta'] = {'image': str(path).split('.')[0],
'object': str(self.ids[index]),
'category': cat_id,
'im_size': (img_meta['height'], img_meta['width'])}
try:
img = np.array(Image.open(os.path.join(self.root, path)).convert('RGB')).astype(np.float32)
if self.mask_per_class:
target = np.zeros([img.shape[0], img.shape[1]])
for ii in range(len(cat_id)):
ann_meta_class = ann_meta[ii]
target_tmp = np.zeros([img.shape[0], img.shape[1]])
for ann in ann_meta_class:
target_tmp = np.logical_or(target_tmp > 0, np.array(coco.annToMask(ann)) > 0)
if self.only_pascal_categories:
coco_cat_name = self.coco.cats[self.cat_ids[ii]]['name']
if coco_cat_name in self.pascal_cat_name:
target[target_tmp > 0] = self.PASCAL_CAT_DICT[coco_cat_name]
else:
target[target_tmp > 0] = ii + 1
else:
target = np.zeros([img.shape[0], img.shape[1], 1])
for ann in ann_meta:
target = np.logical_or(target, np.array(coco.annToMask(ann).reshape([img.shape[0], img.shape[1], 1])))
target = target.astype(np.float32)
except ValueError:
img = np.zeros((100, 100, 3))
target = np.zeros((100, 100))
print('Error reading image ' + str(path) + ' with object id ' + str(self.ids[index]))
sample['image'] = img
if self.mask_per_class:
sample['semseg'] = target
else:
sample['gt'] = target
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
if self.mask_per_class:
return len(self.img_ids)
else:
return len(self.ids)
def __str__(self):
return 'COCOSegmentation(split='+str(self.split)+', area_range='+str(self.area_range) + ')'
if __name__ == "__main__":
from matplotlib.pyplot import imshow, show
import torchvision.transforms as transforms
import fblib.dataloaders.custom_transforms as tr
transform = transforms.Compose([tr.ToTensor()])
dataset = COCOSegmentation(split='val2017', transform=None, retname=True,
area_range=[1000, float("inf")], only_pascal_categories=True, overfit=True)
for i in range(len(dataset)):
sample = dataset[i]
imshow(sample['semseg'])
show()
| astmt-master | fblib/dataloaders/coco.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import torch.utils.data as data
class CombineIMDBs(data.Dataset):
"""
Combine two datasets, for example to create VOC and SBD training set
"""
def __init__(self, dataloaders, excluded=None, repeat=None):
self.dataloaders = dataloaders
self.excluded = excluded
self.im_ids = []
# Combine object lists
for dl in dataloaders:
for elem in dl.im_ids:
if elem not in self.im_ids:
self.im_ids.append(elem)
# Exclude
if excluded:
for dl in excluded:
for elem in dl.im_ids:
if elem in self.im_ids:
self.im_ids.remove(elem)
if repeat:
self.repeat = repeat
assert(len(repeat) == len(dataloaders))
else:
self.repeat = [1] * len(dataloaders)
# Get object pointers
self.im_list = []
new_im_ids = []
num_images = 0
for ii, dl in enumerate(dataloaders):
for jj, curr_im_id in enumerate(dl.im_ids):
if (curr_im_id in self.im_ids) and (curr_im_id not in new_im_ids):
for r in range(self.repeat[ii]):
new_im_ids.append(curr_im_id)
self.im_list.append({'db_ii': ii, 'im_ii': jj})
num_images += 1
self.im_ids = new_im_ids
print('Combined number of images: {:d}\n'.format(num_images))
def __getitem__(self, index):
_db_ii = self.im_list[index]["db_ii"]
_im_ii = self.im_list[index]['im_ii']
# print("db_id: {}, im_id: {}".format(_db_ii, _im_ii))
sample = self.dataloaders[_db_ii].__getitem__(_im_ii)
if 'meta' in sample.keys():
sample['meta']['db'] = str(self.dataloaders[_db_ii])
return sample
def __len__(self):
return len(self.im_ids)
def __str__(self):
include_db = [str(db) for db in self.dataloaders]
exclude_db = [str(db) for db in self.excluded]
return 'Included datasets:'+str(include_db)+'\n'+'Excluded datasets:'+str(exclude_db)
if __name__ == '__main__':
from matplotlib import pyplot as plt
import fblib.dataloaders as dataloaders
pascal_train = dataloaders.VOC12(split='train', do_semseg=True)
sbd = dataloaders.SBD(split=['train', 'val'], do_semseg=True)
pascal_val = dataloaders.VOC12(split='val', do_semseg=True)
dataset = CombineIMDBs([pascal_train, sbd], excluded=[pascal_val])
for i, sample in enumerate(dataset):
plt.imshow(sample['image']/255.)
plt.show()
plt.imshow(sample['semseg'])
plt.show()
| astmt-master | fblib/dataloaders/combine_im_dbs.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import tarfile
import cv2
import numpy as np
import torch.utils.data as data
from six.moves import urllib
from fblib.util.mypath import Path
class MSRA(data.Dataset):
"""
MSRA10k dataset for Saliency Estimation
"""
URL = 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL/MSRA10K.tgz'
FILE = 'MSRA10K.tgz'
def __init__(self,
root=Path.db_root_dir('MSRA10K'),
download=True,
split='trainval',
transform=None,
retname=True,
overfit=False):
if download:
self._download()
self.transform = transform
self.retname = retname
self.root = root
self.gt_dir = os.path.join(self.root, 'gt')
self.image_dir = os.path.join(self.root, 'Imgs')
_splits_dir = os.path.join(self.root, 'gt_sets')
self.split = split
if isinstance(self.split, str):
self.split = [self.split]
self.images = []
self.gts = []
self.im_ids = []
for sp in self.split:
with open(os.path.join(os.path.join(_splits_dir, sp + '.txt')), "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
_image = os.path.join(self.image_dir, line + ".jpg")
_gt = os.path.join(self.gt_dir, line + ".png")
assert os.path.isfile(_image)
assert os.path.isfile(_gt)
self.im_ids.append(line)
self.images.append(_image)
self.gts.append(_gt)
assert (len(self.images) == len(self.gts) == len(self.im_ids))
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
# Display stats
print('Number of images: {:d}'.format(len(self.im_ids)))
def __getitem__(self, index):
sample = {}
_img = self._load_img(index)
sample['image'] = _img
_sal = self._load_sal(index)
sample['sal'] = _sal
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.im_ids)
def _load_img(self, index):
# Read Image
_img = cv2.imread(self.images[index])[:, :, ::-1].astype(np.float32)
return _img
def _load_sal(self, index):
# Read Target object
_gt = cv2.imread(self.gts[index], flags=0).astype(np.float32) / 255.
return _gt
def _download(self):
_fpath = os.path.join(Path.db_root_dir(), self.FILE)
if os.path.isfile(_fpath):
print('Files already downloaded')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.db_root_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def __str__(self):
return 'MSRA(split=' + str(self.split) + ')'
if __name__ == '__main__':
from matplotlib import pyplot as plt
dataset = MSRA()
for i, sample in enumerate(dataset):
plt.imshow(sample['image']/255)
plt.show()
plt.imshow(sample['sal'])
plt.show()
| astmt-master | fblib/dataloaders/msra10k.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import tarfile
import json
import cv2
import numpy as np
import scipy.io as sio
import torch.utils.data as data
from PIL import Image
from skimage.morphology import thin
from six.moves import urllib
from fblib import PROJECT_ROOT_DIR
from fblib.util.mypath import Path
class PASCALContext(data.Dataset):
"""
PASCAL-Context dataset, for multiple tasks
Included tasks:
1. Edge detection,
2. Semantic Segmentation,
3. Human Part Segmentation,
4. Surface Normal prediction (distilled),
5. Saliency (distilled)
"""
URL = 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL/PASCAL_MT.tgz'
FILE = 'PASCAL_MT.tgz'
HUMAN_PART = {1: {'hair': 1, 'head': 1, 'lear': 1, 'lebrow': 1, 'leye': 1, 'lfoot': 1,
'lhand': 1, 'llarm': 1, 'llleg': 1, 'luarm': 1, 'luleg': 1, 'mouth': 1,
'neck': 1, 'nose': 1, 'rear': 1, 'rebrow': 1, 'reye': 1, 'rfoot': 1,
'rhand': 1, 'rlarm': 1, 'rlleg': 1, 'ruarm': 1, 'ruleg': 1, 'torso': 1},
4: {'hair': 1, 'head': 1, 'lear': 1, 'lebrow': 1, 'leye': 1, 'lfoot': 4,
'lhand': 3, 'llarm': 3, 'llleg': 4, 'luarm': 3, 'luleg': 4, 'mouth': 1,
'neck': 2, 'nose': 1, 'rear': 1, 'rebrow': 1, 'reye': 1, 'rfoot': 4,
'rhand': 3, 'rlarm': 3, 'rlleg': 4, 'ruarm': 3, 'ruleg': 4, 'torso': 2},
6: {'hair': 1, 'head': 1, 'lear': 1, 'lebrow': 1, 'leye': 1, 'lfoot': 6,
'lhand': 4, 'llarm': 4, 'llleg': 6, 'luarm': 3, 'luleg': 5, 'mouth': 1,
'neck': 2, 'nose': 1, 'rear': 1, 'rebrow': 1, 'reye': 1, 'rfoot': 6,
'rhand': 4, 'rlarm': 4, 'rlleg': 6, 'ruarm': 3, 'ruleg': 5, 'torso': 2},
14: {'hair': 1, 'head': 1, 'lear': 1, 'lebrow': 1, 'leye': 1, 'lfoot': 14,
'lhand': 8, 'llarm': 7, 'llleg': 13, 'luarm': 6, 'luleg': 12, 'mouth': 1,
'neck': 2, 'nose': 1, 'rear': 1, 'rebrow': 1, 'reye': 1, 'rfoot': 11,
'rhand': 5, 'rlarm': 4, 'rlleg': 10, 'ruarm': 3, 'ruleg': 9, 'torso': 2}
}
VOC_CATEGORY_NAMES = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
CONTEXT_CATEGORY_LABELS = [0,
2, 23, 25, 31, 34,
45, 59, 65, 72, 98,
397, 113, 207, 258, 284,
308, 347, 368, 416, 427]
def __init__(self,
root=Path.db_root_dir('PASCAL_MT'),
download=True,
split='val',
transform=None,
area_thres=0,
retname=True,
overfit=False,
do_edge=True,
do_human_parts=False,
do_semseg=False,
do_normals=False,
do_sal=False,
num_human_parts=6,
):
self.root = root
if download:
self._download()
image_dir = os.path.join(self.root, 'JPEGImages')
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.area_thres = area_thres
self.retname = retname
# Edge Detection
self.do_edge = do_edge
self.edges = []
edge_gt_dir = os.path.join(self.root, 'pascal-context', 'trainval')
# Semantic Segmentation
self.do_semseg = do_semseg
self.semsegs = []
# Human Part Segmentation
self.do_human_parts = do_human_parts
part_gt_dir = os.path.join(self.root, 'human_parts')
self.parts = []
self.human_parts_category = 15
self.cat_part = json.load(open(os.path.join(os.path.dirname(__file__),
'../util/db_info/pascal_part.json'), 'r'))
self.cat_part["15"] = self.HUMAN_PART[num_human_parts]
self.parts_file = os.path.join(os.path.join(self.root, 'ImageSets', 'Parts'),
''.join(self.split) + '.txt')
# Surface Normal Estimation
self.do_normals = do_normals
_normal_gt_dir = os.path.join(self.root, 'normals_distill')
self.normals = []
if self.do_normals:
with open(os.path.join(PROJECT_ROOT_DIR, 'util/db_info/nyu_classes.json')) as f:
cls_nyu = json.load(f)
with open(os.path.join(PROJECT_ROOT_DIR, 'util/db_info/context_classes.json')) as f:
cls_context = json.load(f)
self.normals_valid_classes = []
for cl_nyu in cls_nyu:
if cl_nyu in cls_context and cl_nyu != 'unknown':
self.normals_valid_classes.append(cls_context[cl_nyu])
# Custom additions due to incompatibilities
self.normals_valid_classes.append(cls_context['tvmonitor'])
# Saliency
self.do_sal = do_sal
_sal_gt_dir = os.path.join(self.root, 'sal_distill')
self.sals = []
# train/val/test splits are pre-cut
_splits_dir = os.path.join(self.root, 'ImageSets', 'Context')
self.im_ids = []
self.images = []
print("Initializing dataloader for PASCAL {} set".format(''.join(self.split)))
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(image_dir, line + ".jpg")
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Edges
_edge = os.path.join(edge_gt_dir, line + ".mat")
assert os.path.isfile(_edge)
self.edges.append(_edge)
# Semantic Segmentation
_semseg = self._get_semseg_fname(line)
assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
# Human Parts
_human_part = os.path.join(self.root, part_gt_dir, line + ".mat")
assert os.path.isfile(_human_part)
self.parts.append(_human_part)
_normal = os.path.join(self.root, _normal_gt_dir, line + ".png")
assert os.path.isfile(_normal)
self.normals.append(_normal)
_sal = os.path.join(self.root, _sal_gt_dir, line + ".png")
assert os.path.isfile(_sal)
self.sals.append(_sal)
if self.do_edge:
assert (len(self.images) == len(self.edges))
if self.do_human_parts:
assert (len(self.images) == len(self.parts))
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
if self.do_normals:
assert (len(self.images) == len(self.normals))
if self.do_sal:
assert (len(self.images) == len(self.sals))
if not self._check_preprocess_parts():
print('Pre-processing PASCAL dataset for human parts, this will take long, but will be done only once.')
self._preprocess_parts()
if self.do_human_parts:
# Find images which have human parts
self.has_human_parts = []
for ii in range(len(self.im_ids)):
if self.human_parts_category in self.part_obj_dict[self.im_ids[ii]]:
self.has_human_parts.append(1)
else:
self.has_human_parts.append(0)
# If the other tasks are disabled, select only the images that contain human parts, to allow batching
if not self.do_edge and not self.do_semseg and not self.do_sal and not self.do_normals:
print('Ignoring images that do not contain human parts')
for i in range(len(self.parts) - 1, -1, -1):
if self.has_human_parts[i] == 0:
del self.im_ids[i]
del self.images[i]
del self.parts[i]
del self.has_human_parts[i]
print('Number of images with human parts: {:d}'.format(np.sum(self.has_human_parts)))
# Overfit to n_of images
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
if self.do_edge:
self.edges = self.edges[:n_of]
if self.do_semseg:
self.semsegs = self.semsegs[:n_of]
if self.do_human_parts:
self.parts = self.parts[:n_of]
if self.do_normals:
self.normals = self.normals[:n_of]
if self.do_sal:
self.sals = self.sals[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_edge:
_edge = self._load_edge(index)
if _edge.shape != _img.shape[:2]:
_edge = cv2.resize(_edge, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['edge'] = _edge
if self.do_human_parts:
_human_parts, _ = self._load_human_parts(index)
if _human_parts.shape != _img.shape[:2]:
_human_parts = cv2.resize(_human_parts, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['human_parts'] = _human_parts
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.do_normals:
_normals = self._load_normals_distilled(index)
if _normals.shape[:2] != _img.shape[:2]:
_normals = cv2.resize(_normals, _img.shape[:2][::-1], interpolation=cv2.INTER_CUBIC)
sample['normals'] = _normals
if self.do_sal:
_sal = self._load_sal_distilled(index)
if _sal.shape[:2] != _img.shape[:2]:
_sal = cv2.resize(_sal, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['sal'] = _sal
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _load_img(self, index):
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_edge(self, index):
# Read Target object
_tmp = sio.loadmat(self.edges[index])
_edge = cv2.Laplacian(_tmp['LabelMap'], cv2.CV_64F)
_edge = thin(np.abs(_edge) > 0).astype(np.float32)
return _edge
def _load_human_parts(self, index):
if self.has_human_parts[index]:
# Read Target object
_part_mat = sio.loadmat(self.parts[index])['anno'][0][0][1][0]
_inst_mask = _target = None
for _obj_ii in range(len(_part_mat)):
has_human = _part_mat[_obj_ii][1][0][0] == self.human_parts_category
has_parts = len(_part_mat[_obj_ii][3]) != 0
if has_human and has_parts:
if _inst_mask is None:
_inst_mask = _part_mat[_obj_ii][2].astype(np.float32)
_target = np.zeros(_inst_mask.shape)
else:
_inst_mask = np.maximum(_inst_mask, _part_mat[_obj_ii][2].astype(np.float32))
n_parts = len(_part_mat[_obj_ii][3][0])
for part_i in range(n_parts):
cat_part = str(_part_mat[_obj_ii][3][0][part_i][0][0])
mask_id = self.cat_part[str(self.human_parts_category)][cat_part]
mask = _part_mat[_obj_ii][3][0][part_i][1].astype(bool)
_target[mask] = mask_id
if _target is not None:
_target, _inst_mask = _target.astype(np.float32), _inst_mask.astype(np.float32)
else:
_target, _inst_mask = np.zeros((512, 512), dtype=np.float32), np.zeros((512, 512), dtype=np.float32)
return _target, _inst_mask
else:
return np.zeros((512, 512), dtype=np.float32), np.zeros((512, 512), dtype=np.float32)
def _load_semseg(self, index):
_semseg = np.array(Image.open(self.semsegs[index])).astype(np.float32)
return _semseg
def _load_normals_distilled(self, index):
_tmp = np.array(Image.open(self.normals[index])).astype(np.float32)
_tmp = 2.0 * _tmp / 255.0 - 1.0
labels = sio.loadmat(os.path.join(self.root, 'pascal-context', 'trainval', self.im_ids[index] + '.mat'))
labels = labels['LabelMap']
_normals = np.zeros(_tmp.shape, dtype=np.float)
for x in np.unique(labels):
if x in self.normals_valid_classes:
_normals[labels == x, :] = _tmp[labels == x, :]
return _normals
def _load_sal_distilled(self, index):
_sal = np.array(Image.open(self.sals[index])).astype(np.float32) / 255.
_sal = (_sal > 0.5).astype(np.float32)
return _sal
def _get_semseg_fname(self, fname):
fname_voc = os.path.join(self.root, 'semseg', 'VOC12', fname + '.png')
fname_context = os.path.join(self.root, 'semseg', 'pascal-context', fname + '.png')
if os.path.isfile(fname_voc):
seg = fname_voc
elif os.path.isfile(fname_context):
seg = fname_context
else:
seg = None
print('Segmentation for im: {} was not found'.format(fname))
return seg
def _check_preprocess_parts(self):
_obj_list_file = self.parts_file
if not os.path.isfile(_obj_list_file):
return False
else:
self.part_obj_dict = json.load(open(_obj_list_file, 'r'))
return list(np.sort([str(x) for x in self.part_obj_dict.keys()])) == list(np.sort(self.im_ids))
def _preprocess_parts(self):
self.part_obj_dict = {}
obj_counter = 0
for ii in range(len(self.im_ids)):
# Read object masks and get number of objects
if ii % 100 == 0:
print("Processing image: {}".format(ii))
part_mat = sio.loadmat(
os.path.join(self.root, 'human_parts', '{}.mat'.format(self.im_ids[ii])))
n_obj = len(part_mat['anno'][0][0][1][0])
# Get the categories from these objects
_cat_ids = []
for jj in range(n_obj):
obj_area = np.sum(part_mat['anno'][0][0][1][0][jj][2])
obj_cat = int(part_mat['anno'][0][0][1][0][jj][1])
if obj_area > self.area_thres:
_cat_ids.append(int(part_mat['anno'][0][0][1][0][jj][1]))
else:
_cat_ids.append(-1)
obj_counter += 1
self.part_obj_dict[self.im_ids[ii]] = _cat_ids
with open(self.parts_file, 'w') as outfile:
outfile.write('{{\n\t"{:s}": {:s}'.format(self.im_ids[0], json.dumps(self.part_obj_dict[self.im_ids[0]])))
for ii in range(1, len(self.im_ids)):
outfile.write(
',\n\t"{:s}": {:s}'.format(self.im_ids[ii], json.dumps(self.part_obj_dict[self.im_ids[ii]])))
outfile.write('\n}\n')
print('Preprocessing for parts finished')
def _download(self):
_fpath = os.path.join(Path.db_root_dir(), self.FILE)
if os.path.isfile(_fpath):
print('Files already downloaded')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.db_root_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def __str__(self):
return 'PASCAL_MT(split=' + str(self.split) + ')'
def test_all():
import matplotlib.pyplot as plt
import torch
import fblib.dataloaders.custom_transforms as tr
from torchvision import transforms
from fblib.util.custom_collate import collate_mil
transform = transforms.Compose([tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-90, 90), scales=(1., 1.),
flagvals={'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'human_parts': cv2.INTER_NEAREST,
'normals': cv2.INTER_CUBIC,
'sal': cv2.INTER_NEAREST}),
tr.FixedResize(resolutions={'image': (512, 512),
'edge': (512, 512),
'semseg': (512, 512),
'human_parts': (512, 512),
'normals': (512, 512),
'sal': (512, 512)},
flagvals={'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'human_parts': cv2.INTER_NEAREST,
'normals': cv2.INTER_CUBIC,
'sal': cv2.INTER_NEAREST}),
tr.AddIgnoreRegions(),
tr.ToTensor()])
dataset = PASCALContext(split='train', transform=transform, retname=True,
do_edge=True,
do_semseg=True,
do_human_parts=True,
do_normals=True,
do_sal=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=2, shuffle=False, num_workers=0)
# plt.ion()
for i, sample in enumerate(dataloader):
print(i)
for j in range(sample['image'].shape[0]):
f, ax_arr = plt.subplots(2, 3)
for k in range(len(ax_arr)):
for l in range(len(ax_arr[k])):
ax_arr[k][l].cla()
ax_arr[0][0].set_title('Input Image')
ax_arr[0][0].imshow(np.transpose(sample['image'][j], (1, 2, 0))/255.)
ax_arr[0][1].set_title('Edge')
ax_arr[0][1].imshow(np.transpose(sample['edge'][j], (1, 2, 0))[:, :, 0])
ax_arr[0][2].set_title('Semantic Segmentation')
ax_arr[0][2].imshow(np.transpose(sample['semseg'][j], (1, 2, 0))[:, :, 0] / 20.)
ax_arr[1][0].set_title('Human Part Segmentation')
ax_arr[1][0].imshow(np.transpose(sample['human_parts'][j], (1, 2, 0))[:, :, 0] / 6.)
ax_arr[1][1].set_title('Surface Normals')
ax_arr[1][1].imshow(np.transpose(sample['normals'][j], (1, 2, 0)))
ax_arr[1][2].set_title('Saliency')
ax_arr[1][2].imshow(np.transpose(sample['sal'][j], (1, 2, 0))[:, :, 0])
plt.show()
break
if __name__ == '__main__':
test_all()
| astmt-master | fblib/dataloaders/pascal_context.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import tarfile
import cv2
from PIL import Image
import numpy as np
import torch.utils.data as data
import scipy.io as sio
from six.moves import urllib
from fblib.util.mypath import Path
class NYUD_MT(data.Dataset):
"""
NYUD dataset for multi-task learning.
Includes edge detection, semantic segmentation, surface normals, and depth prediction
"""
URL = 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL/NYUD_MT.tgz'
FILE = 'NYUD_MT.tgz'
def __init__(self,
root=Path.db_root_dir('NYUD_MT'),
download=True,
split='val',
transform=None,
retname=True,
overfit=False,
do_edge=True,
do_semseg=False,
do_normals=False,
do_depth=False,
):
self.root = root
if download:
self._download()
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.retname = retname
# Original Images
self.im_ids = []
self.images = []
_image_dir = os.path.join(root, 'images')
# Edge Detection
self.do_edge = do_edge
self.edges = []
_edge_gt_dir = os.path.join(root, 'edge')
# Semantic segmentation
self.do_semseg = do_semseg
self.semsegs = []
_semseg_gt_dir = os.path.join(root, 'segmentation')
# Surface Normals
self.do_normals = do_normals
self.normals = []
_normal_gt_dir = os.path.join(root, 'normals')
# Depth
self.do_depth = do_depth
self.depths = []
_depth_gt_dir = os.path.join(root, 'depth')
# train/val/test splits are pre-cut
_splits_dir = os.path.join(root, 'gt_sets')
print('Initializing dataloader for NYUD {} set'.format(''.join(self.split)))
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), 'r') as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + '.jpg')
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Edges
_edge = os.path.join(self.root, _edge_gt_dir, line + '.png')
assert os.path.isfile(_edge)
self.edges.append(_edge)
# Semantic Segmentation
_semseg = os.path.join(self.root, _semseg_gt_dir, line + '.mat')
assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
_normal = os.path.join(self.root, _normal_gt_dir, line + '.jpg')
assert os.path.isfile(_normal)
self.normals.append(_normal)
_depth = os.path.join(self.root, _depth_gt_dir, line + '.mat')
assert os.path.isfile(_depth)
self.depths.append(_depth)
if self.do_edge:
assert (len(self.images) == len(self.edges))
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
if self.do_normals:
assert (len(self.images) == len(self.normals))
if self.do_depth:
assert (len(self.images) == len(self.depths))
# Uncomment to overfit to one image
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
# if index == 1102:
# print('hi')
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_edge:
_edge = self._load_edge(index)
if _edge.shape != _img.shape[:2]:
_edge = cv2.resize(_edge, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['edge'] = _edge
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.do_normals:
_normals = self._load_normals(index)
if _normals.shape[:2] != _img.shape[:2]:
_normals = cv2.resize(_normals, _img.shape[:2][::-1], interpolation=cv2.INTER_CUBIC)
sample['normals'] = _normals
if self.do_depth:
_depth = self._load_depth(index)
if _depth.shape[:2] != _img.shape[:2]:
_depth = cv2.resize(_depth, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['depth'] = _depth
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _load_img(self, index):
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_edge(self, index):
_edge = np.array(Image.open(self.edges[index])).astype(np.float32) / 255.
return _edge
def _load_semseg(self, index):
# Note: Related works are ignoring the background class (40-way classification), such as:
# _semseg = np.array(sio.loadmat(self.semsegs[index])['segmentation']).astype(np.float32) - 1
# _semseg[_semseg == -1] = 255
# However, all experiments of ASTMT were conducted by using 41-way classification:
_semseg = np.array(sio.loadmat(self.semsegs[index])['segmentation']).astype(np.float32)
return _semseg
def _load_normals(self, index):
_tmp = np.array(Image.open(self.normals[index])).astype(np.float32)
_normals = 2.0 * _tmp / 255.0 - 1.0
return _normals
def _load_depth(self, index):
_depth = np.array(sio.loadmat(self.depths[index])['depth']).astype(np.float32)
return _depth
def _download(self):
_fpath = os.path.join(Path.db_root_dir(), self.FILE)
if os.path.isfile(_fpath):
print('Files already downloaded')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.db_root_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def __str__(self):
return 'NYUD Multitask (split=' + str(self.split) + ')'
class NYUDRaw(data.Dataset):
"""
NYUD dataset for Surface Normal and Depth Estimation using NYUD raw data.
"""
def __init__(self,
root=Path.db_root_dir('NYUD_raw'),
split='train',
transform=None,
do_normals=True,
do_depth=False,
retname=True,
overfit=False,
):
self.root = root
self.transform = transform
self.split = split
self.retname = retname
self.do_normals = do_normals
self.do_depth = do_depth
# Original Images
self.im_ids = []
self.images = []
_image_dir = os.path.join(root, self.split, 'images')
_mask_gt_dir = os.path.join(root, self.split, 'masks')
# Surface Normals
self.normals = []
nrm_ext = '.png' if self.split == 'train' else '.jpg'
self.masks = []
_normal_gt_dir = os.path.join(root, self.split, 'normals')
# Monocular depth
self.depths = []
_depth_gt_dir = os.path.join(root, self.split, 'depth')
# train/val/test splits are pre-cut
_splits_dir = os.path.join(root, 'gt_sets')
print('Initializing dataloader for NYUD Raw, {} set'.format(self.split))
with open(os.path.join(os.path.join(_splits_dir, self.split + '.txt')), 'r') as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + '.jpg')
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
if self.do_normals:
# Normals
_normal = os.path.join(self.root, _normal_gt_dir, line + nrm_ext)
assert os.path.isfile(_normal)
self.normals.append(_normal)
if self.do_depth:
# Depth
_depth = os.path.join(self.root, _depth_gt_dir, line + '.mat')
assert os.path.isfile(_depth)
self.depths.append(_depth)
if self.split == 'train':
# Masks (only available for train data)
_mask = os.path.join(self.root, _mask_gt_dir, line + '.png')
assert os.path.isfile(_mask)
self.masks.append(_mask)
if self.do_normals:
assert(len(self.images) == len(self.normals))
if self.do_depth:
assert(len(self.images) == len(self.depths))
if self.split == 'train':
assert(len(self.images) == len(self.masks))
# uncomment to overfit to one image
if overfit:
n_of = 64
self.images = self.images[:n_of]
self.im_ids = self.im_ids[:n_of]
# display stats
print('number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_normals:
_normals = self._load_normals(index)
sample['normals'] = _normals
if self.do_depth:
_depth = self._load_depth(index)
sample['depth'] = _depth
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def _load_img(self, index):
_img = cv2.imread(self.images[index])[:, :, ::-1].astype(np.float32)
return _img
def _load_normals(self, index):
_tmp = cv2.imread(self.normals[index])[:, :, ::-1].astype(np.float32)
_normals = 2.0 * _tmp / 255.0 - 1.0
if self.split == 'train':
_mask = cv2.imread(self.masks[index], 0)
_normals[_mask == 0, :] = 0
return _normals
def _load_depth(self, index):
_depth = np.array(sio.loadmat(self.depths[index])['depth']).astype(np.float32)
if self.split == 'train':
_mask = cv2.imread(self.masks[index], 0)
_depth[_mask == 0] = 0
return _depth
def __len__(self):
return len(self.images)
def __str__(self):
return 'NYUD-v2 Raw,split=' + str(self.split) + ')'
def test_mt():
transform = transforms.Compose([tr.RandomHorizontalFlip(),
tr.ScaleNRotate(rots=(-90, 90), scales=(1., 1.),
flagvals={'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'normals': cv2.INTER_LINEAR,
'depth': cv2.INTER_LINEAR}),
tr.FixedResize(resolutions={'image': (512, 512),
'edge': (512, 512),
'semseg': (512, 512),
'normals': (512, 512),
'depth': (512, 512)},
flagvals={'image': cv2.INTER_CUBIC,
'edge': cv2.INTER_NEAREST,
'semseg': cv2.INTER_NEAREST,
'normals': cv2.INTER_LINEAR,
'depth': cv2.INTER_LINEAR}),
tr.AddIgnoreRegions(),
tr.ToTensor()])
dataset = NYUD_MT(split='train', transform=transform, retname=True,
do_edge=True,
do_semseg=True,
do_normals=True,
do_depth=True)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=5, shuffle=False, num_workers=5)
for i, sample in enumerate(dataloader):
imshow(sample['image'][0, 0])
show()
imshow(sample['edge'][0, 0])
show()
imshow(sample['semseg'][0, 0])
show()
imshow(sample['normals'][0, 0])
show()
imshow(sample['depth'][0, 0])
show()
if __name__ == '__main__':
from matplotlib.pyplot import imshow, show
import torch
import fblib.dataloaders.custom_transforms as tr
from torchvision import transforms
test_mt()
| astmt-master | fblib/dataloaders/nyud.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import numpy.random as random
import numpy as np
import torch
import cv2
import math
import fblib.util.helpers as helpers
class ScaleNRotate(object):
"""Scale (zoom-in, zoom-out) and Rotate the image and the ground truth.
Args:
two possibilities:
1. rots (tuple): (minimum, maximum) rotation angle
scales (tuple): (minimum, maximum) scale
2. rots [list]: list of fixed possible rotation angles
scales [list]: list of fixed possible scales
"""
def __init__(self, rots=(-30, 30), scales=(.75, 1.25), semseg=False, flagvals=None):
assert (isinstance(rots, type(scales)))
self.rots = rots
self.scales = scales
self.semseg = semseg
self.flagvals = flagvals
def __call__(self, sample):
if type(self.rots) == tuple:
# Continuous range of scales and rotations
rot = (self.rots[1] - self.rots[0]) * random.random() - \
(self.rots[1] - self.rots[0])/2
sc = (self.scales[1] - self.scales[0]) * random.random() - \
(self.scales[1] - self.scales[0]) / 2 + 1
elif type(self.rots) == list:
# Fixed range of scales and rotations
rot = self.rots[random.randint(0, len(self.rots))]
sc = self.scales[random.randint(0, len(self.scales))]
for elem in sample.keys():
if 'meta' in elem:
continue
tmp = sample[elem]
h, w = tmp.shape[:2]
center = (w / 2, h / 2)
assert(center != 0) # Strange behaviour warpAffine
M = cv2.getRotationMatrix2D(center, rot, sc)
if self.flagvals is None:
if ((tmp == 0) | (tmp == 1)).all():
flagval = cv2.INTER_NEAREST
elif 'gt' in elem and self.semseg:
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
else:
flagval = self.flagvals[elem]
if elem == 'normals':
# Rotate Normals properly
in_plane = np.arctan2(tmp[:, :, 0], tmp[:, :, 1])
nrm_0 = np.sqrt(tmp[:, :, 0] ** 2 + tmp[:, :, 1] ** 2)
rot_rad= rot * 2 * math.pi / 360
tmp[:, :, 0] = np.sin(in_plane + rot_rad) * nrm_0
tmp[:, :, 1] = np.cos(in_plane + rot_rad) * nrm_0
tmp = cv2.warpAffine(tmp, M, (w, h), flags=flagval)
sample[elem] = tmp
return sample
def __str__(self):
return 'ScaleNRotate:(rot='+str(self.rots)+',scale='+str(self.scales)+')'
class FixedResize(object):
"""Resize the image and the ground truth to specified resolution.
Args:
resolutions (dict): the list of resolutions
"""
def __init__(self, resolutions=None, flagvals=None):
self.resolutions = resolutions
self.flagvals = flagvals
if self.flagvals is not None:
assert(len(self.resolutions) == len(self.flagvals))
def __call__(self, sample):
# Fixed range of scales
if self.resolutions is None:
return sample
elems = list(sample.keys())
for elem in elems:
if 'meta' in elem or 'bbox' in elem:
continue
if elem in self.resolutions:
if self.resolutions[elem] is None:
continue
if isinstance(sample[elem], list):
if sample[elem][0].ndim == 3:
output_size = np.append(self.resolutions[elem], [3, len(sample[elem])])
else:
output_size = np.append(self.resolutions[elem], len(sample[elem]))
tmp = sample[elem]
sample[elem] = np.zeros(output_size, dtype=np.float32)
for ii, crop in enumerate(tmp):
if self.flagvals is None:
sample[elem][..., ii] = helpers.fixed_resize(crop, self.resolutions[elem])
else:
sample[elem][..., ii] = helpers.fixed_resize(crop, self.resolutions[elem], flagval=self.flagvals[elem])
else:
if self.flagvals is None:
sample[elem] = helpers.fixed_resize(sample[elem], self.resolutions[elem])
else:
sample[elem] = helpers.fixed_resize(sample[elem], self.resolutions[elem], flagval=self.flagvals[elem])
if elem == 'normals':
N1, N2, N3 = sample[elem][:, :, 0], sample[elem][:, :, 1], sample[elem][:, :, 2]
Nn = np.sqrt(N1 ** 2 + N2 ** 2 + N3 ** 2) + np.finfo(np.float32).eps
sample[elem][:, :, 0], sample[elem][:, :, 1], sample[elem][:, :, 2] = N1/Nn, N2/Nn, N3/Nn
else:
del sample[elem]
return sample
def __str__(self):
return 'FixedResize:'+str(self.resolutions)
class RandomResize(object):
"""Randomly resize the image and the ground truth to specified scales.
Args:
scales (list): the list of scales
"""
def __init__(self, scales=[0.5, 0.8, 1]):
self.scales = scales
def __call__(self, sample):
# Fixed range of scales
sc = self.scales[random.randint(0, len(self.scales))]
for elem in sample.keys():
if 'meta' in elem or 'bbox' in elem:
continue
tmp = sample[elem]
if ((tmp == 0) | (tmp == 1)).all():
flagval = cv2.INTER_NEAREST
else:
flagval = cv2.INTER_CUBIC
tmp = cv2.resize(tmp, None, fx=sc, fy=sc, interpolation=flagval)
sample[elem] = tmp
return sample
def __str__(self):
return 'RandomResize:'+str(self.scales)
class FixedResizeRatio(object):
"""Fixed resize for the image and the ground truth to specified scale.
Args:
scales (float): the scale
"""
def __init__(self, scale=None, flagvals=None):
self.scale = scale
self.flagvals = flagvals
def __call__(self, sample):
for elem in sample.keys():
if 'meta' in elem:
continue
if elem in self.flagvals:
if self.flagvals[elem] is None:
continue
tmp = sample[elem]
tmp = cv2.resize(tmp, None, fx=self.scale, fy=self.scale, interpolation=self.flagvals[elem])
sample[elem] = tmp
return sample
def __str__(self):
return 'FixedResizeRatio: '+str(self.scale)
class RandomHorizontalFlip(object):
"""Horizontally flip the given image and ground truth randomly with a probability of 0.5."""
def __call__(self, sample):
if random.random() < 0.5:
for elem in sample.keys():
if 'meta' in elem:
continue
else:
tmp = sample[elem]
tmp = cv2.flip(tmp, flipCode=1)
sample[elem] = tmp
if elem == 'normals':
sample[elem][:, :, 0] *= -1
return sample
def __str__(self):
return 'RandomHorizontalFlip'
class NormalizeImage(object):
"""
Return the given elements between 0 and 1
"""
def __init__(self, norm_elem='image', clip=False):
self.norm_elem = norm_elem
self.clip = clip
def __call__(self, sample):
if isinstance(self.norm_elem, tuple):
for elem in self.norm_elem:
if np.max(sample[elem]) > 1:
sample[elem] /= 255.0
else:
if self.clip:
sample[self.norm_elem] = np.clip(sample[self.norm_elem], 0, 255)
if np.max(sample[self.norm_elem]) > 1:
sample[self.norm_elem] /= 255.0
return sample
def __str__(self):
return 'NormalizeImage'
class ToImage(object):
"""
Return the given elements between 0 and 255
"""
def __init__(self, norm_elem='image', custom_max=255.):
self.norm_elem = norm_elem
self.custom_max = custom_max
def __call__(self, sample):
if isinstance(self.norm_elem, tuple):
for elem in self.norm_elem:
tmp = sample[elem]
sample[elem] = self.custom_max * (tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-10)
else:
tmp = sample[self.norm_elem]
sample[self.norm_elem] = self.custom_max * (tmp - tmp.min()) / (tmp.max() - tmp.min() + 1e-10)
return sample
def __str__(self):
return 'NormalizeImage'
class AddIgnoreRegions(object):
"""Add Ignore Regions"""
def __call__(self, sample):
for elem in sample.keys():
tmp = sample[elem]
if elem == 'normals':
# Check areas with norm 0
Nn = np.sqrt(tmp[:, :, 0] ** 2 + tmp[:, :, 1] ** 2 + tmp[:, :, 2] ** 2)
tmp[Nn == 0, :] = 255.
sample[elem] = tmp
elif elem == 'human_parts':
# Check for images without human part annotations
if (tmp == 0).all():
tmp = 255 * np.ones(tmp.shape, dtype=tmp.dtype)
sample[elem] = tmp
elif elem == 'depth':
tmp[tmp == 0] = 255.
sample[elem] = tmp
return sample
class ToTensor(object):
"""Convert ndarrays in sample to Tensors."""
def __call__(self, sample):
for elem in sample.keys():
if 'meta' in elem:
continue
elif 'bbox' in elem:
tmp = sample[elem]
sample[elem] = torch.from_numpy(tmp)
continue
tmp = sample[elem]
if tmp.ndim == 2:
tmp = tmp[:, :, np.newaxis]
# swap color axis because
# numpy image: H x W x C
# torch image: C X H X W
tmp = tmp.transpose((2, 0, 1))
sample[elem] = torch.from_numpy(tmp.astype(np.float32))
return sample
def __str__(self):
return 'ToTensor'
| astmt-master | fblib/dataloaders/custom_transforms.py |
from .bsds import BSDS500
from .coco import COCOSegmentation
from .fsv import FSVGTA
from .nyud import NYUD_MT, NYUDRaw
from .pascal_context import PASCALContext
from .pascal_voc import VOC12
from .sbd import SBD
from .msra10k import MSRA
from .pascal_sal import PASCALS
__all__ = ['BSDS500', 'COCOSegmentation', 'FSVGTA', 'NYUD_MT',
'NYUDRaw', 'PASCALContext', 'VOC12', 'SBD', 'MSRA', 'PASCALS'] | astmt-master | fblib/dataloaders/__init__.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import errno
import cv2
import hashlib
import tarfile
import numpy as np
import scipy.io as sio
import torch.utils.data as data
from PIL import Image
from six.moves import urllib
from fblib.util.mypath import Path
class SBD(data.Dataset):
URL = 'http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz'
FILE = 'benchmark.tgz'
MD5 = '82b4d87ceb2ed10f6038a1cba92111cb'
BASE_DIR = 'benchmark_RELEASE/dataset'
VOC_CATEGORY_NAMES = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
def __init__(self,
root=Path.db_root_dir('PASCAL'),
download=True,
split='val',
transform=None,
retname=True,
do_semseg=True,
overfit=False,
):
self.root = root
_sbd_root = os.path.join(self.root, self.BASE_DIR)
_inst_dir = os.path.join(_sbd_root, 'inst')
_cat_dir = os.path.join(_sbd_root, 'cls')
_image_dir = os.path.join(_sbd_root, 'img')
if download:
self._download()
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.retname = retname
self.do_semseg = do_semseg
if self.do_semseg:
self.semsegs = []
# train/val/test splits are pre-cut
_splits_dir = os.path.join(_sbd_root)
self.im_ids = []
self.images = []
print("Initializing dataloader for SBD {} set".format(''.join(self.split)))
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + ".jpg")
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Semantic Segmentation
if self.do_semseg:
_semseg = os.path.join(_cat_dir, line + '.mat')
assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
# Uncomment to overfit to one image
if overfit:
n_of = 32
self.im_ids = self.im_ids[:n_of]
self.images = self.images[:n_of]
if self.do_semseg:
self.semsegs = self.semsegs[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
# if index == 1102:
# print('hi')
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg is not None:
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _load_img(self, index):
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_semseg(self, index):
_semseg = sio.loadmat(self.semsegs[index])['GTcls'][0][0][1]
_semseg = np.array(_semseg).astype(np.float32)
return _semseg
def _check_integrity(self):
_fpath = os.path.join(self.root, self.FILE)
if not os.path.isfile(_fpath):
print("{} does not exist".format(_fpath))
return False
_md5c = hashlib.md5(open(_fpath, 'rb').read()).hexdigest()
if _md5c != self.MD5:
print(" MD5({}) did not match MD5({}) expected for {}".format(
_md5c, self.MD5, _fpath))
return False
return True
def _download(self):
_fpath = os.path.join(self.root, self.FILE)
try:
os.makedirs(self.root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
if self._check_integrity():
print('Files already downloaded and verified')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting the tar file')
tar = tarfile.open(_fpath)
os.chdir(self.root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def get_img_size(self, idx=0):
img = Image.open(os.path.join(self.root, 'JPEGImages', self.images[idx] + '.jpg'))
return list(reversed(img.size))
def __str__(self):
return 'SBD(split=' + str(self.split) + ')'
if __name__ == '__main__':
from matplotlib import pyplot as plt
dataset = SBD(split=['train', 'val'], retname=True, do_semseg=True)
for i, sample in enumerate(dataset):
plt.imshow(sample['image']/255.)
plt.show()
plt.imshow(sample['semseg'])
plt.show()
| astmt-master | fblib/dataloaders/sbd.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
from __future__ import print_function
import torch.utils.data as data
from PIL import Image
import os
import os.path
import errno
import numpy as np
import torch
import codecs
import random
from fblib.util.mypath import Path
class MNIST(data.Dataset):
"""`MNIST <http://yann.lecun.com/exdb/mnist/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz',
'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz',
]
raw_folder = 'raw'
processed_folder = 'processed'
training_file = 'training.pt'
test_file = 'test.pt'
def __init__(self, root=Path.db_root_dir('MNIST'), train=True, transform=None, target_transform=None, download=False,
multitask=False):
self.root = os.path.expanduser(root)
self.transform = transform
self.target_transform = target_transform
self.train = train # training set or test set
self.multitask = multitask
if download:
self.download()
if not self._check_exists():
raise RuntimeError('Dataset not found.' +
' You can use download=True to download it')
if self.train:
self.train_data, self.train_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.training_file))
else:
self.test_data, self.test_labels = torch.load(
os.path.join(self.root, self.processed_folder, self.test_file))
if multitask:
self._process_labels()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if not self.multitask:
if self.train:
img, target = self.train_data[index], self.train_labels[index]
else:
img, target = self.test_data[index], self.test_labels[index]
else:
if self.train:
img, target, orig = self.train_data[index], self.train_labels_multitask[index], self.train_labels[index]
else:
img, target, orig = self.test_data[index], self.test_labels_multitask[index], self.test_labels[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy(), mode='L')
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
if not self.multitask:
return img, target
else:
return img, target, orig
def __len__(self):
if self.train:
return len(self.train_data)
else:
return len(self.test_data)
def _process_labels(self):
elem = self.train_labels if self.train else self.test_labels
temp = [[0] * 2 for i in range(len(elem))]
for i in range(len(elem)):
# Create two conflicting tasks
if elem[i] >= 5:
temp[i][0] = 1
if elem[i] % 2 == 1:
temp[i][1] = 1
if self.train:
self.train_labels_multitask = temp
else:
self.test_labels_multitask = temp
def _check_exists(self):
return os.path.exists(os.path.join(self.root, self.processed_folder, self.training_file)) and \
os.path.exists(os.path.join(self.root, self.processed_folder, self.test_file))
def download(self):
"""Download the MNIST data if it doesn't exist in processed_folder already."""
from six.moves import urllib
import gzip
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
for url in self.urls:
print('Downloading ' + url)
data = urllib.request.urlopen(url)
filename = url.rpartition('/')[2]
file_path = os.path.join(self.root, self.raw_folder, filename)
with open(file_path, 'wb') as f:
f.write(data.read())
with open(file_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(file_path) as zip_f:
out_f.write(zip_f.read())
os.unlink(file_path)
# process and save as torch files
print('Processing...')
training_set = (
read_image_file(os.path.join(self.root, self.raw_folder, 'train-images-idx3-ubyte')),
read_label_file(os.path.join(self.root, self.raw_folder, 'train-labels-idx1-ubyte'))
)
test_set = (
read_image_file(os.path.join(self.root, self.raw_folder, 't10k-images-idx3-ubyte')),
read_label_file(os.path.join(self.root, self.raw_folder, 't10k-labels-idx1-ubyte'))
)
with open(os.path.join(self.root, self.processed_folder, self.training_file), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.root, self.processed_folder, self.test_file), 'wb') as f:
torch.save(test_set, f)
print('Done!')
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = 'train' if self.train is True else 'test'
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
class FashionMNIST(MNIST):
"""`Fashion-MNIST <https://github.com/zalandoresearch/fashion-mnist>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
urls = [
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/train-labels-idx1-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-images-idx3-ubyte.gz',
'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/t10k-labels-idx1-ubyte.gz',
]
class EMNIST(MNIST):
"""`EMNIST <https://www.nist.gov/itl/iad/image-group/emnist-dataset/>`_ Dataset.
Args:
root (string): Root directory of dataset where ``processed/training.pt``
and ``processed/test.pt`` exist.
split (string): The dataset has 6 different splits: ``byclass``, ``bymerge``,
``balanced``, ``letters``, ``digits`` and ``mnist``. This argument specifies
which one to use.
train (bool, optional): If True, creates dataset from ``training.pt``,
otherwise from ``test.pt``.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
"""
url = 'http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/gzip.zip'
splits = ('byclass', 'bymerge', 'balanced', 'letters', 'digits', 'mnist')
def __init__(self, root, split, **kwargs):
if split not in self.splits:
raise ValueError('Split "{}" not found. Valid splits are: {}'.format(
split, ', '.join(self.splits),
))
self.split = split
self.training_file = self._training_file(split)
self.test_file = self._test_file(split)
super(EMNIST, self).__init__(root, **kwargs)
def _training_file(self, split):
return 'training_{}.pt'.format(split)
def _test_file(self, split):
return 'test_{}.pt'.format(split)
def download(self):
"""Download the EMNIST data if it doesn't exist in processed_folder already."""
from six.moves import urllib
import gzip
import shutil
import zipfile
if self._check_exists():
return
# download files
try:
os.makedirs(os.path.join(self.root, self.raw_folder))
os.makedirs(os.path.join(self.root, self.processed_folder))
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
print('Downloading ' + self.url)
data = urllib.request.urlopen(self.url)
filename = self.url.rpartition('/')[2]
raw_folder = os.path.join(self.root, self.raw_folder)
file_path = os.path.join(raw_folder, filename)
with open(file_path, 'wb') as f:
f.write(data.read())
print('Extracting zip archive')
with zipfile.ZipFile(file_path) as zip_f:
zip_f.extractall(raw_folder)
os.unlink(file_path)
gzip_folder = os.path.join(raw_folder, 'gzip')
for gzip_file in os.listdir(gzip_folder):
if gzip_file.endswith('.gz'):
print('Extracting ' + gzip_file)
with open(os.path.join(raw_folder, gzip_file.replace('.gz', '')), 'wb') as out_f, \
gzip.GzipFile(os.path.join(gzip_folder, gzip_file)) as zip_f:
out_f.write(zip_f.read())
shutil.rmtree(gzip_folder)
# process and save as torch files
for split in self.splits:
print('Processing ' + split)
training_set = (
read_image_file(os.path.join(raw_folder, 'emnist-{}-train-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(raw_folder, 'emnist-{}-train-labels-idx1-ubyte'.format(split)))
)
test_set = (
read_image_file(os.path.join(raw_folder, 'emnist-{}-test-images-idx3-ubyte'.format(split))),
read_label_file(os.path.join(raw_folder, 'emnist-{}-test-labels-idx1-ubyte'.format(split)))
)
with open(os.path.join(self.root, self.processed_folder, self._training_file(split)), 'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.root, self.processed_folder, self._test_file(split)), 'wb') as f:
torch.save(test_set, f)
print('Done!')
class MultiMNIST(MNIST):
def __init__(self, root=Path.db_root_dir('MNIST'), train=True, transform=None, target_transform=None, download=False):
super(MultiMNIST, self).__init__(root, train, transform, target_transform, download, multitask=False)
def __getitem__(self, index1):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
if self.train:
image_lst = self.train_data
target_lst = self.train_labels
else:
image_lst = self.test_data
target_lst = self.test_labels
if self.train:
index2 = random.randint(0, self.__len__() - 1)
else:
index2 = self.__len__() - 1 - index1
img1, target1 = image_lst[index1], target_lst[index1]
img2, target2 = image_lst[index2], target_lst[index2]
shift = 2
img = torch.zeros(img1.size())
img[:28-shift, :28-shift] = img1[shift:, shift:]
img[shift:, shift:] = torch.max(img[shift:, shift:], img2[:28-shift, :28-shift].float())
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img.numpy().astype(np.uint8), mode='L')
if self.transform is not None:
img = self.transform(img)
return img, target1, target2
def get_int(b):
return int(codecs.encode(b, 'hex'), 16)
def read_label_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2049
length = get_int(data[4:8])
parsed = np.frombuffer(data, dtype=np.uint8, offset=8)
return torch.from_numpy(parsed).view(length).long()
def read_image_file(path):
with open(path, 'rb') as f:
data = f.read()
assert get_int(data[:4]) == 2051
length = get_int(data[4:8])
num_rows = get_int(data[8:12])
num_cols = get_int(data[12:16])
images = []
parsed = np.frombuffer(data, dtype=np.uint8, offset=16)
return torch.from_numpy(parsed).view(length, num_rows, num_cols)
if __name__ == '__main__':
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from matplotlib import pyplot as plt
trans = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5,), (1.0,))])
db_train = MultiMNIST(train=True, transform=trans, download=True)
db_test = MultiMNIST(train=False, transform=trans, download=True)
trainloader = DataLoader(db_train, batch_size=64, shuffle=True, num_workers=2, pin_memory=True)
for ii, (img, label1, label2) in enumerate(trainloader):
plt.imshow(img[0, 0, :, :])
plt.show()
| astmt-master | fblib/dataloaders/mnist_multitask.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import tarfile
from PIL import Image
import numpy as np
from glob import glob
import scipy.io as sio
import torch.utils.data as data
from six.moves import urllib
from fblib.util.mypath import Path
class BSDS500(data.Dataset):
"""
BSDS500 datasets for edge detection.
"""
URL = 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL/BSDS500.tgz'
FILE = 'BSDS500.tgz'
def __init__(self,
root=Path.db_root_dir('BSDS500'),
download=True,
split=['train', 'val'],
transform=None,
retname=True,
n_votes=1,
overfit=False):
if download:
self._download()
self.transform = transform
self.retname = retname
self.n_votes = n_votes
self.root = root
self.gt_dir = os.path.join(self.root, 'data', 'groundTruth')
self.image_dir = os.path.join(self.root, 'data', 'images')
_splits_dir = os.path.join(self.root, 'lists')
if not os.path.exists(os.path.join(_splits_dir)):
os.mkdir(os.path.join(_splits_dir))
self.split = split
self._get_images_trainval()
if isinstance(self.split, str):
self.split = [self.split]
self.images = []
self.gts = []
self.im_ids = []
for sp in self.split:
with open(os.path.join(os.path.join(_splits_dir, sp + '.txt')), "r") as f:
lines = f.readlines()
for line in lines:
line = line.strip()
_image = os.path.join(self.image_dir, sp, line + ".jpg")
_gt = os.path.join(self.gt_dir, sp, line + ".mat")
assert os.path.isfile(_image)
assert os.path.isfile(_gt)
self.im_ids.append(line)
self.images.append(_image)
self.gts.append(_gt)
assert (len(self.images) == len(self.gts) == len(self.im_ids))
if overfit:
n_of = 16
self.images = self.images[:n_of]
self.gts = self.gts[:n_of]
self.im_ids = self.im_ids[:n_of]
# Display stats
print('Number of images: {:d}'.format(len(self.im_ids)))
def __getitem__(self, index):
sample = {}
_img = self._load_img(index)
sample['image'] = _img
_edge = self._load_edge(index)
sample['edge'] = _edge
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.im_ids)
def _get_images_trainval(self):
for sp in self.split:
if os.path.isfile(os.path.join(self.root, 'lists', sp + '.txt')):
continue
img_list = glob(os.path.join(self.gt_dir, sp, '*.mat'))
img_list = sorted([x.split('/')[-1].split('.')[-2] for x in img_list])
split_f = os.path.join(self.root, 'lists', sp + '.txt')
with open(split_f, 'w') as f:
for img in img_list:
assert os.path.isfile(os.path.join(self.image_dir, sp, img + '.jpg'))
f.write('{}\n'.format(img))
def _load_img(self, index):
# Read Image
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_edge(self, index):
# Read Target object
_gt_mat = sio.loadmat(self.gts[index])
_target = np.zeros(_gt_mat['groundTruth'][0][0]['Boundaries'][0][0].shape)
for i in range(len(_gt_mat['groundTruth'][0])):
_target += _gt_mat['groundTruth'][0][i]['Boundaries'][0][0]
if self.n_votes and self.n_votes > 0:
_target = (_target >= self.n_votes).astype(np.float32)
else:
_target = (_target / max(1e-8, _target.max())).astype(np.float32)
return _target
def _download(self):
_fpath = os.path.join(Path.db_root_dir(), self.FILE)
if os.path.isfile(_fpath):
print('Files already downloaded')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.db_root_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def __str__(self):
return 'BSDS500(split=' + str(self.split) + ', n_votes=' + str(self.n_votes) + ')'
if __name__ == '__main__':
from matplotlib import pyplot as plt
dataset = BSDS500()
for i, sample in enumerate(dataset):
plt.imshow(sample['image'] / 255.)
plt.show()
plt.imshow(sample['edge'])
plt.show()
| astmt-master | fblib/dataloaders/bsds.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import errno
import cv2
import hashlib
import tarfile
import numpy as np
import torch.utils.data as data
from PIL import Image
from six.moves import urllib
from fblib.util.mypath import Path
class VOC12(data.Dataset):
URL = "http://host.robots.ox.ac.uk/pascal/VOC/voc2012/VOCtrainval_11-May-2012.tar"
FILE = "VOCtrainval_11-May-2012.tar"
MD5 = '6cd6e144f989b92b3379bac3b3de84fd'
BASE_DIR = 'VOCdevkit/VOC2012'
VOC_CATEGORY_NAMES = ['background',
'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow',
'diningtable', 'dog', 'horse', 'motorbike', 'person',
'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']
def __init__(self,
root=Path.db_root_dir('PASCAL'),
download=True,
split='val',
transform=None,
area_thres=0,
retname=True,
suppress_void_pixels=False,
do_semseg=True,
overfit=False,
):
self.root = root
_voc_root = os.path.join(self.root, self.BASE_DIR)
_inst_dir = os.path.join(_voc_root, 'SegmentationObject')
_cat_dir = os.path.join(_voc_root, 'SegmentationClass')
_image_dir = os.path.join(_voc_root, 'JPEGImages')
if download:
self._download()
self.transform = transform
if isinstance(split, str):
self.split = [split]
else:
split.sort()
self.split = split
self.area_thres = area_thres
self.retname = retname
self.suppress_void_pixels = suppress_void_pixels
self.do_semseg = do_semseg
if self.do_semseg:
self.semsegs = []
# train/val/test splits are pre-cut
_splits_dir = os.path.join(_voc_root, 'ImageSets', 'Segmentation')
self.im_ids = []
self.images = []
print("Initializing dataloader for PASCAL VOC12 {} set".format(''.join(self.split)))
for splt in self.split:
with open(os.path.join(os.path.join(_splits_dir, splt + '.txt')), "r") as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + ".jpg")
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Semantic Segmentation
if self.do_semseg:
_semseg = os.path.join(_cat_dir, line + '.png')
assert os.path.isfile(_semseg)
self.semsegs.append(_semseg)
if self.do_semseg:
assert (len(self.images) == len(self.semsegs))
# Uncomment to overfit to one image
if overfit:
n_of = 32
self.im_ids = self.im_ids[:n_of]
self.images = self.images[:n_of]
if self.do_semseg:
self.semsegs = self.semsegs[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
# if index == 1102:
# print('hi')
sample = {}
_img = self._load_img(index)
sample['image'] = _img
if self.do_semseg:
_semseg = self._load_semseg(index)
if _semseg is not None:
if _semseg.shape != _img.shape[:2]:
_semseg = cv2.resize(_semseg, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['semseg'] = _semseg
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _check_integrity(self):
_fpath = os.path.join(self.root, self.FILE)
if not os.path.isfile(_fpath):
print("{} does not exist".format(_fpath))
return False
_md5c = hashlib.md5(open(_fpath, 'rb').read()).hexdigest()
if _md5c != self.MD5:
print(" MD5({}) did not match MD5({}) expected for {}".format(
_md5c, self.MD5, _fpath))
return False
return True
def _download(self):
_fpath = os.path.join(self.root, self.FILE)
try:
os.makedirs(self.root)
except OSError as e:
if e.errno == errno.EEXIST:
pass
else:
raise
if self._check_integrity():
print('Files already downloaded and verified')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting the tar file')
tar = tarfile.open(_fpath)
os.chdir(self.root)
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def _load_img(self, index):
_img = np.array(Image.open(self.images[index]).convert('RGB')).astype(np.float32)
return _img
def _load_semseg(self, index):
_semseg = np.array(Image.open(self.semsegs[index])).astype(np.float32)
if self.suppress_void_pixels:
_semseg[_semseg == 255] = 0
return _semseg
def get_img_size(self, idx=0):
img = Image.open(os.path.join(self.root, 'JPEGImages', self.images[idx] + '.jpg'))
return list(reversed(img.size))
def __str__(self):
return 'VOC12(split=' + str(self.split) + ',area_thres=' + str(self.area_thres) + ')'
if __name__ == '__main__':
from matplotlib import pyplot as plt
dataset = VOC12(split='train', retname=True, do_semseg=True, suppress_void_pixels=True)
for i, sample in enumerate(dataset):
plt.imshow(sample['image']/255.)
plt.show()
plt.imshow(sample['semseg'])
plt.show()
| astmt-master | fblib/dataloaders/pascal_voc.py |
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import sys
import tarfile
import cv2
from PIL import Image
import numpy as np
import torch.utils.data as data
from six.moves import urllib
from fblib.util.mypath import Path
class PASCALS(data.Dataset):
URL = 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL/PASCAL-S.tgz'
FILE = 'PASCAL-S.tgz'
def __init__(self,
root=Path.db_root_dir('PASCAL-S'),
download=True,
transform=None,
retname=True,
overfit=False,
threshold=None,
):
self.root = root
_image_dir = os.path.join(self.root, 'images')
_sal_dir = os.path.join(self.root, 'masks')
_split_dir = os.path.join(self.root, 'gt_sets')
if download:
self._download()
self.transform = transform
self.threshold = threshold
self.retname = retname
self.im_ids = []
self.images = []
self.sals = []
print('Initializing dataloader for PASCAL Saliency')
with open(os.path.join(os.path.join(_split_dir, 'all.txt')), 'r') as f:
lines = f.read().splitlines()
for ii, line in enumerate(lines):
# Images
_image = os.path.join(_image_dir, line + '.jpg')
assert os.path.isfile(_image)
self.images.append(_image)
self.im_ids.append(line.rstrip('\n'))
# Saliency
_sal = os.path.join(_sal_dir, line + '.png')
assert os.path.isfile(_sal)
self.sals.append(_sal)
assert (len(self.images) == len(self.sals))
# Uncomment to overfit to one image
if overfit:
n_of = 64
self.im_ids = self.im_ids[:n_of]
self.images = self.images[:n_of]
# Display stats
print('Number of dataset images: {:d}'.format(len(self.images)))
def __getitem__(self, index):
sample = {}
# Load Image
_img = self._load_img(index)
sample['image'] = _img
# Load Saliency
_sal = self._load_sal(index)
if _sal.shape != _img.shape[:2]:
_sal = cv2.resize(_sal, _img.shape[:2][::-1], interpolation=cv2.INTER_NEAREST)
sample['sal'] = _sal
if self.retname:
sample['meta'] = {'image': str(self.im_ids[index]),
'im_size': (_img.shape[0], _img.shape[1])}
if self.transform is not None:
sample = self.transform(sample)
return sample
def __len__(self):
return len(self.images)
def _load_img(self, index):
#
_img = cv2.imread(self.images[index])[:, :, ::-1].astype(np.float32)
return _img
def _load_sal(self, index):
tmp = np.array(Image.open(self.sals[index])) / 255.
if self.threshold:
_sal = (tmp > self.threshold).astype(np.float32)
else:
_sal = tmp.astype(np.float32)
return _sal
def _download(self):
_fpath = os.path.join(Path.db_root_dir(), self.FILE)
if os.path.isfile(_fpath):
print('Files already downloaded')
return
else:
print('Downloading ' + self.URL + ' to ' + _fpath)
def _progress(count, block_size, total_size):
sys.stdout.write('\r>> %s %.1f%%' %
(_fpath, float(count * block_size) /
float(total_size) * 100.0))
sys.stdout.flush()
urllib.request.urlretrieve(self.URL, _fpath, _progress)
# extract file
cwd = os.getcwd()
print('\nExtracting tar file')
tar = tarfile.open(_fpath)
os.chdir(Path.db_root_dir())
tar.extractall()
tar.close()
os.chdir(cwd)
print('Done!')
def __str__(self):
return 'PASCAL-S()'
if __name__ == '__main__':
from matplotlib.pyplot import imshow, show
dataset = PASCALS(threshold=.5)
for i, sample in enumerate(dataset):
imshow(sample['image'] / 255.)
show()
imshow(sample['sal'])
show()
| astmt-master | fblib/dataloaders/pascal_sal.py |
astmt-master | fblib/networks/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import math
import torch
import torch.nn as nn
from fblib.util.mypath import Path
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
model_urls = {
'mobilenet_v2_1280': 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/mobilenet_v2_1280-ecbe2b568.pth'
}
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
def forward(self, x):
if self.use_res_connect:
return x + self.conv(x)
else:
return self.conv(x)
class MobileNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1., last_channel=1280):
super(MobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
# building first layer
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
self.features = [conv_bn(3, input_channel, 2)]
# building inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# building last several layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# building classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class MobileNetV2Features(MobileNetV2):
def __init__(self, n_class=1000, input_size=224, width_mult=1., last_channel=1280):
super(MobileNetV2Features, self).__init__(n_class=n_class,
input_size=input_size,
width_mult=width_mult,
last_channel=last_channel)
def forward(self, x):
x = self.features(x)
features = x.mean(3).mean(2)
x = self.classifier(features)
return x, features
def mobilenet_v2(pretrained=False, features=False, n_class=1000, last_channel=1280, remote=True):
if not features:
model = MobileNetV2(n_class=n_class, last_channel=last_channel)
else:
model = MobileNetV2Features(n_class=n_class, last_channel=last_channel)
if pretrained:
if remote:
checkpoint = load_state_dict_from_url(model_urls['mobilenet_v2_1280'], map_location='cpu', progress=True)
else:
checkpoint = torch.load(
os.path.join(Path.models_dir(), 'mobilenet_v2.pth'), map_location='cpu')
model.load_state_dict(checkpoint)
return model
def test_visualize_graph():
import fblib.util.visualize as viz
net = mobilenet_v2()
net.eval()
x = torch.randn(2, 3, 224, 224)
x.requires_grad_()
y = net(x)
# pdf visualizer
g = viz.make_dot(y, net.state_dict())
g.view(directory='./')
def test_reproduce():
import os
import cv2
import numpy as np
import pickle
import urllib.request
import torch.nn.functional as F
from fblib import PROJECT_ROOT_DIR
classes = pickle.load(urllib.request.urlopen(
'https://gist.githubusercontent.com/yrevar/6135f1bd8dcf2e0cc683/raw/'
'd133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee/imagenet1000_clsid_to_human.pkl'))
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = cv2.imread(
os.path.join(PROJECT_ROOT_DIR, 'util/img/cat.jpg')) \
.astype(np.float32) / 255.
img = cv2.resize(img, dsize=(224, 224))
img = (img - mean) / std
img = img[:, :, :, np.newaxis]
img = img.transpose((3, 2, 0, 1))
img = torch.from_numpy(img.astype(np.float32))
model = mobilenet_v2(pretrained=True, features=False)
model = model.eval()
with torch.no_grad():
output = model(img)
output = torch.nn.functional.softmax(output, dim=1)
print('Class id: {}, class name: {}, probability: {:.2f}'''
''.format(output.argmax().item(), classes[output.argmax().item()], output.max().item()))
if __name__ == '__main__':
test_reproduce()
| astmt-master | fblib/networks/classification/mobilenet_v2.py |
astmt-master | fblib/networks/classification/__init__.py |
|
# Copyright (c) Facebook, Inc. and its affiliates.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
import os
import math
from collections import OrderedDict
import torch
import torch.nn as nn
from fblib.util.mypath import Path
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
model_urls = {
'se_mobilenet_v2_1280': 'https://data.vision.ee.ethz.ch/kmaninis/share/MTL//models/'
'se_mobilenet_v2_1280-ce5a6e1d9.pth'
}
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
def conv_1x1_bn(inp, oup):
return nn.Sequential(
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU6(inplace=True)
)
class SEMobile(nn.Module):
def __init__(self, channel, reduction=4):
super(SEMobile, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction),
nn.ReLU6(inplace=True),
nn.Linear(channel // reduction, channel),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y
class InvertedResidual(nn.Module):
def __init__(self, inp, oup, stride, expand_ratio):
super(InvertedResidual, self).__init__()
self.stride = stride
assert stride in [1, 2]
hidden_dim = round(inp * expand_ratio)
self.use_res_connect = self.stride == 1 and inp == oup
if expand_ratio == 1:
self.conv = nn.Sequential(
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
else:
self.conv = nn.Sequential(
# pw
nn.Conv2d(inp, hidden_dim, 1, 1, 0, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# dw
nn.Conv2d(hidden_dim, hidden_dim, 3, stride, 1, groups=hidden_dim, bias=False),
nn.BatchNorm2d(hidden_dim),
nn.ReLU6(inplace=True),
# pw-linear
nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
)
self.se = SEMobile(channel=oup, reduction=4)
def forward(self, x):
if self.use_res_connect:
out = self.conv(x)
out = self.se(out)
return x + out
else:
out = self.conv(x)
out = self.se(out)
return out
class SEMobileNetV2(nn.Module):
def __init__(self, n_class=1000, input_size=224, width_mult=1., last_channel=1280):
super(SEMobileNetV2, self).__init__()
block = InvertedResidual
input_channel = 32
interverted_residual_setting = [
# t, c, n, s
[1, 16, 1, 1],
[6, 24, 2, 2],
[6, 32, 3, 2],
[6, 64, 4, 2],
[6, 96, 3, 1],
[6, 160, 3, 2],
[6, 320, 1, 1],
]
assert input_size % 32 == 0
input_channel = int(input_channel * width_mult)
self.last_channel = int(last_channel * width_mult) if width_mult > 1.0 else last_channel
# build first layer
self.features = [conv_bn(3, input_channel, 2)]
# build inverted residual blocks
for t, c, n, s in interverted_residual_setting:
output_channel = int(c * width_mult)
for i in range(n):
if i == 0:
self.features.append(block(input_channel, output_channel, s, expand_ratio=t))
else:
self.features.append(block(input_channel, output_channel, 1, expand_ratio=t))
input_channel = output_channel
# build last layers
self.features.append(conv_1x1_bn(input_channel, self.last_channel))
# make it nn.Sequential
self.features = nn.Sequential(*self.features)
# build classifier
self.classifier = nn.Sequential(
nn.Dropout(0.2),
nn.Linear(self.last_channel, n_class),
)
self._initialize_weights()
def forward(self, x):
x = self.features(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
def _initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
if m.bias is not None:
m.bias.data.zero_()
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
n = m.weight.size(1)
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
class SEMobileNetV2Features(SEMobileNetV2):
def __init__(self, n_class=1000, input_size=224, width_mult=1., last_channel=1280):
super(SEMobileNetV2Features, self).__init__(n_class=n_class,
input_size=input_size,
width_mult=width_mult,
last_channel=last_channel)
def forward(self, x):
x = self.features(x)
features = x.mean(3).mean(2)
x = self.classifier(features)
return x, features
def se_mobilenet_v2(pretrained=False, features=False, n_class=1000, last_channel=1280, remote=True):
if not features:
model = SEMobileNetV2(n_class=n_class, last_channel=last_channel)
else:
model = SEMobileNetV2Features(n_class=n_class, last_channel=last_channel)
if pretrained:
print('Loading Imagenet pre-trained SE-MobileNet-v2')
# Load checkpoint
if remote:
checkpoint = load_state_dict_from_url(model_urls['se_mobilenet_v2_1280'], map_location='cpu', progress=True)
else:
checkpoint = torch.load(
os.path.join(Path.models_dir(), 'se_mobilenet_v2_1280.pth'), map_location=lambda storage, loc: storage)
checkpoint = checkpoint['model_state']
# Handle DataParallel
if 'module.' in list(checkpoint.keys())[0]:
new_state_dict = OrderedDict()
for k, v in checkpoint.items():
name = k.replace('module.', '') # remove `module.`
new_state_dict[name] = v
else:
new_state_dict = checkpoint
# Load pre-trained IN model
model.load_state_dict(new_state_dict)
return model
def test_reproduce():
import os
import torch
import pickle
import cv2
import numpy as np
import urllib.request
from fblib import PROJECT_ROOT_DIR
classes = pickle.load(urllib.request.urlopen(
'https://gist.githubusercontent.com/yrevar/6135f1bd8dcf2e0cc683/raw/d133d61a09d7e5a3b36b8c111a8dd5c4b5d560ee'
'/imagenet1000_clsid_to_human.pkl'))
model = se_mobilenet_v2(pretrained=True)
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img = cv2.imread(os.path.join(PROJECT_ROOT_DIR, 'util/img/cat.jpg')).astype(np.float32) / 255.
img = cv2.resize(img, dsize=(224, 224))
img = (img - mean) / std
img = img[:, :, :, np.newaxis]
img = img.transpose((3, 2, 0, 1))
img = torch.from_numpy(img.astype(np.float32))
model = model.eval()
with torch.no_grad():
output = model(img)
output = torch.nn.functional.softmax(output, dim=1)
print('Class id: {}, class name: {}, probability: {:.2f}'''
''.format(output.argmax().item(), classes[output.argmax().item()], output.max().item()))
if __name__ == '__main__':
test_reproduce()
| astmt-master | fblib/networks/classification/se_mobilenet_v2.py |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.