python_code
stringlengths 0
456k
|
---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Softmax(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
)
x = np.array([[-1, 0, 1]]).astype(np.float32)
# expected output [[0.09003058, 0.24472848, 0.66524094]]
y = np.exp(x) / np.sum(np.exp(x), axis=1)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_example')
@staticmethod
def export_softmax_axis(): # type: () -> None
def softmax_2d(x): # type: (np.ndarray) -> np.ndarray
max_x = np.max(x, axis=1).reshape((-1, 1))
exp_x = np.exp(x - max_x)
return exp_x / np.sum(exp_x, axis=1).reshape((-1, 1))
x = np.array([[0, 1, 2, 3], [10000, 10001, 10002, 10003]]).astype(np.float32)
# expected output [[0.0320586, 0.08714432, 0.23688284, 0.64391428],
# [0.0320586, 0.08714432, 0.23688284, 0.64391428]]
y = softmax_2d(x)
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_large_number')
x = np.abs(np.random.randn(3, 4, 5).astype(np.float32))
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
axis=0,
)
y = softmax_2d(x.reshape(1, 60)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_axis_0')
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
axis=1,
)
y = softmax_2d(x.reshape(3, 20)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_axis_1')
# default axis is 1
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_default_axis')
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
axis=2,
)
y = softmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_axis_2')
node = onnx.helper.make_node(
'Softmax',
inputs=['x'],
outputs=['y'],
axis=-1,
)
y = softmax_2d(x.reshape(12, 5)).reshape(3, 4, 5)
expect(node, inputs=[x], outputs=[y],
name='test_softmax_negative_axis')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Atan(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Atan',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.arctan(x)
expect(node, inputs=[x], outputs=[y],
name='test_atan_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.arctan(x)
expect(node, inputs=[x], outputs=[y],
name='test_atan')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Atanh(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Atanh',
inputs=['x'],
outputs=['y'],
)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arctanh(x) # expected output [-0.54930615, 0., 0.54930615]
expect(node, inputs=[x], outputs=[y],
name='test_atanh_example')
x = np.random.uniform(0.0, 1.0, (3, 4, 5)).astype(np.float32)
y = np.arctanh(x)
expect(node, inputs=[x], outputs=[y],
name='test_atanh')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Acos(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Acos',
inputs=['x'],
outputs=['y'],
)
x = np.array([-0.5, 0, 0.5]).astype(np.float32)
y = np.arccos(x)
expect(node, inputs=[x], outputs=[y],
name='test_acos_example')
x = np.random.rand(3, 4, 5).astype(np.float32)
y = np.arccos(x)
expect(node, inputs=[x], outputs=[y],
name='test_acos')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Expand(Base):
@staticmethod
def export_dim_changed(): # type: () -> None
node = onnx.helper.make_node(
'Expand',
inputs=['data', 'new_shape'],
outputs=['expanded'],
)
shape = [3, 1]
data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
#print(data)
#[[1.], [2.], [3.]]
new_shape = [2, 1, 6]
expanded = data * np.ones(new_shape, dtype=np.float32)
#print(expanded)
#[[[1., 1., 1., 1., 1., 1.],
# [2., 2., 2., 2., 2., 2.],
# [3., 3., 3., 3., 3., 3.]],
#
# [[1., 1., 1., 1., 1., 1.],
# [2., 2., 2., 2., 2., 2.],
# [3., 3., 3., 3., 3., 3.]]]
new_shape = np.array(new_shape, dtype=np.int64)
expect(node, inputs=[data, new_shape], outputs=[expanded],
name='test_expand_dim_changed')
@staticmethod
def export_dim_unchanged(): # type: () -> None
node = onnx.helper.make_node(
'Expand',
inputs=['data', 'new_shape'],
outputs=['expanded'],
)
shape = [3, 1]
new_shape = [3, 4]
data = np.reshape(np.arange(1, np.prod(shape) + 1, dtype=np.float32), shape)
#print(data)
#[[1.], [2.], [3.]]
expanded = np.tile(data, 4)
#print(expanded)
#[[1., 1., 1., 1.],
# [2., 2., 2., 2.],
# [3., 3., 3., 3.]]
new_shape = np.array(new_shape, dtype=np.int64)
expect(node, inputs=[data, new_shape], outputs=[expanded],
name='test_expand_dim_unchanged')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class QLinearConv(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node('QLinearConv',
inputs=['x', 'x_scale', 'x_zero_point', 'w', 'w_scale', 'w_zero_point', 'y_scale', 'y_zero_point'],
outputs=['y'],)
x = np.array([[255, 174, 162, 25, 203, 168, 58],
[15, 59, 237, 95, 129, 0, 64],
[56, 242, 153, 221, 168, 12, 166],
[232, 178, 186, 195, 237, 162, 237],
[188, 39, 124, 77, 80, 102, 43],
[127, 230, 21, 83, 41, 40, 134],
[255, 154, 92, 141, 42, 148, 247], ], dtype=np.uint8).reshape((1, 1, 7, 7))
x_scale = np.float32(0.00369204697)
x_zero_point = np.uint8(132)
w = np.array([0], dtype=np.uint8).reshape((1, 1, 1, 1))
w_scale = np.array([0.00172794575], dtype=np.float32)
w_zero_point = np.array([255], dtype=np.uint8)
y_scale = np.float32(0.00162681262)
y_zero_point = np.uint8(123)
output = np.array([[0, 81, 93, 230, 52, 87, 197],
[240, 196, 18, 160, 126, 255, 191],
[199, 13, 102, 34, 87, 243, 89],
[23, 77, 69, 60, 18, 93, 18],
[67, 216, 131, 178, 175, 153, 212],
[128, 25, 234, 172, 214, 215, 121],
[0, 101, 163, 114, 213, 107, 8], ], dtype=np.uint8).reshape((1, 1, 7, 7))
expect(node, inputs=[x, x_scale, x_zero_point, w, w_scale, w_zero_point, y_scale, y_zero_point], outputs=[output],
name='test_qlinearconv')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Sin(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Sin',
inputs=['x'],
outputs=['y'],
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.sin(x)
expect(node, inputs=[x], outputs=[y],
name='test_sin_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.sin(x)
expect(node, inputs=[x], outputs=[y],
name='test_sin')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
# The below GatherElements' numpy implementation is from https://stackoverflow.com/a/46204790/11767360
def gather_elements(data, indices, axis=0): # type: ignore
data_swaped = np.swapaxes(data, 0, axis)
index_swaped = np.swapaxes(indices, 0, axis)
gathered = np.choose(index_swaped, data_swaped, mode='wrap')
y = np.swapaxes(gathered, 0, axis)
return y
class GatherElements(Base):
@staticmethod
def export_gather_elements_0(): # type: () -> None
axis = 1
node = onnx.helper.make_node(
'GatherElements',
inputs=['data', 'indices'],
outputs=['y'],
axis=axis,
)
data = np.array([[1, 2],
[3, 4]], dtype=np.float32)
indices = np.array([[0, 0],
[1, 0]], dtype=np.int32)
y = gather_elements(data, indices, axis)
# print(y) produces
# [[1, 1],
# [4, 3]]
expect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],
name='test_gather_elements_0')
@staticmethod
def export_gather_elements_1(): # type: () -> None
axis = 0
node = onnx.helper.make_node(
'GatherElements',
inputs=['data', 'indices'],
outputs=['y'],
axis=axis,
)
data = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=np.float32)
indices = np.array([[1, 2, 0],
[2, 0, 0]], dtype=np.int32)
y = gather_elements(data, indices, axis)
# print(y) produces
# [[4, 8, 3],
# [7, 2, 3]]
expect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],
name='test_gather_elements_1')
@staticmethod
def export_gather_elements_negative_indices(): # type: () -> None
axis = 0
node = onnx.helper.make_node(
'GatherElements',
inputs=['data', 'indices'],
outputs=['y'],
axis=axis,
)
data = np.array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]], dtype=np.float32)
indices = np.array([[-1, -2, 0],
[-2, 0, 0]], dtype=np.int32)
y = gather_elements(data, indices, axis)
# print(y) produces
# [[7, 5, 3],
# [4, 2, 3]]
expect(node, inputs=[data, indices.astype(np.int64)], outputs=[y],
name='test_gather_elements_negative_indices')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Selu(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Selu',
inputs=['x'],
outputs=['y'],
alpha=2.0,
gamma=3.0
)
x = np.array([-1, 0, 1]).astype(np.float32)
# expected output [-3.79272318, 0., 3.]
y = np.clip(x, 0, np.inf) * 3.0 + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0
expect(node, inputs=[x], outputs=[y],
name='test_selu_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) * 3.0 + (np.exp(np.clip(x, -np.inf, 0)) - 1) * 2.0 * 3.0
expect(node, inputs=[x], outputs=[y],
name='test_selu')
@staticmethod
def export_selu_default(): # type: () -> None
default_alpha = 1.67326319217681884765625
default_gamma = 1.05070102214813232421875
node = onnx.helper.make_node(
'Selu',
inputs=['x'],
outputs=['y'],
)
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, 0, np.inf) * default_gamma + \
(np.exp(np.clip(x, -np.inf, 0)) - 1) * default_alpha * default_gamma
expect(node, inputs=[x], outputs=[y],
name='test_selu_default')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Reciprocal(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Reciprocal',
inputs=['x'],
outputs=['y'],
)
x = np.array([-4, 2]).astype(np.float32)
y = np.reciprocal(x) # expected output [-0.25, 0.5],
expect(node, inputs=[x], outputs=[y],
name='test_reciprocal_example')
x = np.random.rand(3, 4, 5).astype(np.float32) + 0.5
y = np.reciprocal(x)
expect(node, inputs=[x], outputs=[y],
name='test_reciprocal')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
def argmin_use_numpy(data, axis=0, keepdims=1): # type: (np.ndarray, int, int) -> (np.ndarray)
result = np.argmin(data, axis=axis)
if (keepdims == 1):
result = np.expand_dims(result, axis)
return result.astype(np.int64)
def argmin_use_numpy_select_last_index(data, axis=0, keepdims=True): # type: (np.ndarray, int, int) -> (np.ndarray)
data = np.flip(data, axis)
result = np.argmin(data, axis=axis)
result = data.shape[axis] - result - 1
if keepdims:
result = np.expand_dims(result, axis)
return result.astype(np.int64)
class ArgMin(Base):
@staticmethod
def export_no_keepdims(): # type: () -> None
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
axis = 1
keepdims = 0
node = onnx.helper.make_node(
'ArgMin',
inputs=['data'],
outputs=['result'],
axis=axis,
keepdims=keepdims)
# The content of result is : [[1, 0]]
result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_example')
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [2, 4]
result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_random')
@staticmethod
def export_keepdims(): # type: () -> None
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
axis = 1
keepdims = 1
node = onnx.helper.make_node(
'ArgMin',
inputs=['data'],
outputs=['result'],
axis=axis,
keepdims=keepdims)
# The content of result is : [[1], [0]]
result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_example')
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [2, 1, 4]
result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_random')
@staticmethod
def export_default_axes_keepdims(): # type: () -> None
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
keepdims = 1
node = onnx.helper.make_node(
'ArgMin',
inputs=['data'],
outputs=['result'],
keepdims=keepdims)
# The content of result is : [[0], [0]]
result = argmin_use_numpy(data, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_example')
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [1, 3, 4]
result = argmin_use_numpy(data, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_random')
@staticmethod
def export_negative_axis_keepdims(): # type: () -> None
data = np.array([[2, 1], [3, 10]], dtype=np.float32)
axis = -1
keepdims = 1
node = onnx.helper.make_node(
'ArgMin',
inputs=['data'],
outputs=['result'],
axis=axis,
keepdims=keepdims)
# The content of result is : [[1], [0]]
result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_example')
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [2, 3, 1]
result = argmin_use_numpy(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_random')
@staticmethod
def export_no_keepdims_select_last_index(): # type: () -> None
data = np.array([[2, 2], [3, 10]], dtype=np.float32)
axis = 1
keepdims = 0
node = onnx.helper.make_node(
'ArgMin',
inputs=['data'],
outputs=['result'],
axis=axis,
keepdims=keepdims,
select_last_index=True)
# result: [[1, 0]]
result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_example_select_last_index')
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [2, 4]
result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_no_keepdims_random_select_last_index')
@staticmethod
def export_keepdims_select_last_index(): # type: () -> None
data = np.array([[2, 2], [3, 10]], dtype=np.float32)
axis = 1
keepdims = 1
node = onnx.helper.make_node(
'ArgMin',
inputs=['data'],
outputs=['result'],
axis=axis,
keepdims=keepdims,
select_last_index=True)
# result: [[1], [0]]
result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_example_select_last_index')
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [2, 1, 4]
result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_keepdims_random_select_last_index')
@staticmethod
def export_default_axes_keepdims_select_last_index(): # type: () -> None
data = np.array([[2, 2], [3, 10]], dtype=np.float32)
keepdims = 1
node = onnx.helper.make_node(
'ArgMin',
inputs=['data'],
outputs=['result'],
keepdims=keepdims,
select_last_index=True)
# result: [[0, 0]]
result = argmin_use_numpy_select_last_index(data, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_example_select_last_index')
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [1, 3, 4]
result = argmin_use_numpy_select_last_index(data, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_default_axis_random_select_last_index')
@staticmethod
def export_negative_axis_keepdims_select_last_index(): # type: () -> None
data = np.array([[2, 2], [3, 10]], dtype=np.float32)
axis = -1
keepdims = 1
node = onnx.helper.make_node(
'ArgMin',
inputs=['data'],
outputs=['result'],
axis=axis,
keepdims=keepdims,
select_last_index=True)
# result: [[1], [0]]
result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_example_select_last_index')
data = np.random.uniform(-10, 10, [2, 3, 4]).astype(np.float32)
# result's shape: [2, 3, 1]
result = argmin_use_numpy_select_last_index(data, axis=axis, keepdims=keepdims)
expect(node, inputs=[data], outputs=[result], name='test_argmin_negative_axis_keepdims_random_select_last_index')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class DepthToSpace(Base):
@staticmethod
def export_default_mode_example(): # type: () -> None
node = onnx.helper.make_node(
'DepthToSpace',
inputs=['x'],
outputs=['y'],
blocksize=2,
mode='DCR'
)
# (1, 8, 2, 3) input tensor
x = np.array([[[[0., 1., 2.],
[3., 4., 5.]],
[[9., 10., 11.],
[12., 13., 14.]],
[[18., 19., 20.],
[21., 22., 23.]],
[[27., 28., 29.],
[30., 31., 32.]],
[[36., 37., 38.],
[39., 40., 41.]],
[[45., 46., 47.],
[48., 49., 50.]],
[[54., 55., 56.],
[57., 58., 59.]],
[[63., 64., 65.],
[66., 67., 68.]]]]).astype(np.float32)
# (1, 2, 4, 6) output tensor
y = np.array([[[[0., 18., 1., 19., 2., 20.],
[36., 54., 37., 55., 38., 56.],
[3., 21., 4., 22., 5., 23.],
[39., 57., 40., 58., 41., 59.]],
[[9., 27., 10., 28., 11., 29.],
[45., 63., 46., 64., 47., 65.],
[12., 30., 13., 31., 14., 32.],
[48., 66., 49., 67., 50., 68.]]]]).astype(np.float32)
expect(node, inputs=[x], outputs=[y],
name='test_depthtospace_example')
@staticmethod
def export_crd_mode_example(): # type: () -> None
node = onnx.helper.make_node(
'DepthToSpace',
inputs=['x'],
outputs=['y'],
blocksize=2,
mode='CRD'
)
# (1, 8, 2, 3) input tensor
x = np.array([[[[0., 1., 2.],
[3., 4., 5.]],
[[9., 10., 11.],
[12., 13., 14.]],
[[18., 19., 20.],
[21., 22., 23.]],
[[27., 28., 29.],
[30., 31., 32.]],
[[36., 37., 38.],
[39., 40., 41.]],
[[45., 46., 47.],
[48., 49., 50.]],
[[54., 55., 56.],
[57., 58., 59.]],
[[63., 64., 65.],
[66., 67., 68.]]]]).astype(np.float32)
# (1, 2, 4, 6) output tensor
y = np.array([[[[0., 9., 1., 10., 2., 11.],
[18., 27., 19., 28., 20., 29.],
[3., 12., 4., 13., 5., 14.],
[21., 30., 22., 31., 23., 32.]],
[[36., 45., 37., 46., 38., 47.],
[54., 63., 55., 64., 56., 65.],
[39., 48., 40., 49., 41., 50.],
[57., 66., 58., 67., 59., 68.]]]]).astype(np.float32)
expect(node, inputs=[x], outputs=[y],
name='test_depthtospace_crd_mode_example')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class And(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'And',
inputs=['x', 'y'],
outputs=['and'],
)
# 2d
x = (np.random.randn(3, 4) > 0).astype(np.bool)
y = (np.random.randn(3, 4) > 0).astype(np.bool)
z = np.logical_and(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_and2d')
# 3d
x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)
y = (np.random.randn(3, 4, 5) > 0).astype(np.bool)
z = np.logical_and(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_and3d')
# 4d
x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)
y = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)
z = np.logical_and(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_and4d')
@staticmethod
def export_and_broadcast(): # type: () -> None
node = onnx.helper.make_node(
'And',
inputs=['x', 'y'],
outputs=['and'],
)
# 3d vs 1d
x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)
y = (np.random.randn(5) > 0).astype(np.bool)
z = np.logical_and(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_and_bcast3v1d')
# 3d vs 2d
x = (np.random.randn(3, 4, 5) > 0).astype(np.bool)
y = (np.random.randn(4, 5) > 0).astype(np.bool)
z = np.logical_and(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_and_bcast3v2d')
# 4d vs 2d
x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)
y = (np.random.randn(5, 6) > 0).astype(np.bool)
z = np.logical_and(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_and_bcast4v2d')
# 4d vs 3d
x = (np.random.randn(3, 4, 5, 6) > 0).astype(np.bool)
y = (np.random.randn(4, 5, 6) > 0).astype(np.bool)
z = np.logical_and(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_and_bcast4v3d')
# 4d vs 4d
x = (np.random.randn(1, 4, 1, 6) > 0).astype(np.bool)
y = (np.random.randn(3, 1, 5, 6) > 0).astype(np.bool)
z = np.logical_and(x, y)
expect(node, inputs=[x, y], outputs=[z],
name='test_and_bcast4v4d')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import pytest # type: ignore
from .coverage import Coverage
from typing import Dict, Text, Sequence, Any, List
_coverage = Coverage()
_marks = {} # type: Dict[Text, Sequence[Any]]
def _add_mark(mark, bucket): # type: (Any, Text) -> None
proto = mark.args[0]
if isinstance(proto, list):
assert len(proto) == 1
proto = proto[0]
if proto is not None:
_coverage.add_proto(proto, bucket, mark.args[1] == 'RealModel')
def pytest_runtest_call(item): # type: (pytest.nodes.Item) -> None
mark = item.get_closest_marker('onnx_coverage')
if mark:
assert item.nodeid not in _marks
_marks[item.nodeid] = mark
def pytest_runtest_logreport(report): # type: (Any) -> None
if (report.when == 'call'
and report.outcome == 'passed'
and report.nodeid in _marks):
mark = _marks[report.nodeid]
_add_mark(mark, 'passed')
@pytest.hookimpl(trylast=True) # type: ignore
def pytest_terminal_summary(terminalreporter, exitstatus): # type: (pytest.terminal.TerminalReporter, int) -> None
for mark in _marks.values():
_add_mark(mark, 'loaded')
_coverage.report_text(terminalreporter)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
import os
import csv
import datetime
from tabulate import tabulate # type: ignore
import onnx
from onnx import defs, helper, GraphProto
from typing import Optional, Text, Set, Dict, IO, List, Any
_all_schemas = defs.get_all_schemas()
class AttrCoverage(object):
def __init__(self): # type: () -> None
self.name = None # type: Optional[Text]
self.values = set() # type: Set[Text]
def add(self, attr): # type: (onnx.AttributeProto) -> None
assert self.name in [None, attr.name]
self.name = attr.name
value = helper.get_attribute_value(attr)
# Turn list into tuple so we can put it into set
# As value can be string, don't blindly turn `collections.Iterable`
# into tuple.
if isinstance(value, list):
value = tuple(value)
self.values.add(str(value))
class NodeCoverage(object):
def __init__(self): # type: () -> None
self.op_type = None # type: Optional[Text]
self.attr_coverages = defaultdict(AttrCoverage) # type: Dict[Text, AttrCoverage]
def add(self, node): # type: (onnx.NodeProto) -> None
assert self.op_type in [None, node.op_type]
if self.op_type is None:
self.op_type = node.op_type
assert self.op_type is not None
self.schema = defs.get_schema(self.op_type)
for attr in node.attribute:
self.attr_coverages[attr.name].add(attr)
class ModelCoverage(object):
def __init__(self): # type: () -> None
self.name = None # type: Optional[Text]
self.graph = None # type: Optional[GraphProto]
self.node_coverages = defaultdict(NodeCoverage) # type: Dict[Text, NodeCoverage]
def add(self, model): # type: (onnx.ModelProto) -> None
assert self.name in [None, model.graph.name]
if self.name is None:
self.name = model.graph.name
assert self.name is not None
self.graph = model.graph
for node in model.graph.node:
self.node_coverages[node.op_type].add(node)
class Coverage(object):
def __init__(self): # type: () -> None
self.buckets = {
'loaded': defaultdict(NodeCoverage),
'passed': defaultdict(NodeCoverage),
} # type: Dict[Text, Dict[Text, NodeCoverage]]
self.models = {
'loaded': defaultdict(ModelCoverage),
'passed': defaultdict(ModelCoverage),
} # type: Dict[Text, Dict[Text, ModelCoverage]]
def add_node(self, node, bucket): # type: (onnx.NodeProto, Text) -> None
self.buckets[bucket][node.op_type].add(node)
def add_graph(self, graph, bucket): # type: (onnx.GraphProto, Text) -> None
for node in graph.node:
self.add_node(node, bucket)
def add_model(self, model, bucket, is_model): # type: (onnx.ModelProto, Text, bool) -> None
self.add_graph(model.graph, bucket)
# Only add model if name does not start with test
if is_model:
self.models[bucket][model.graph.name].add(model)
def add_proto(self, proto, bucket, is_model): # type: (onnx.ModelProto, Text, bool) -> None
assert isinstance(proto, onnx.ModelProto)
self.add_model(proto, bucket, is_model)
def report_text(self, writer): # type: (IO[Text]) -> None
writer.write('---------- onnx coverage: ----------\n')
writer.write('Operators (passed/loaded/total): {}/{}/{}\n'.format(
len(self.buckets['passed']),
len(self.buckets['loaded']),
len(_all_schemas)))
writer.write('------------------------------------\n')
rows = []
passed = []
all_ops = [] # type: List[Text]
experimental = [] # type: List[Text]
for op_cov in self.buckets['passed'].values():
covered_attrs = [
'{}: {}'.format(attr_cov.name, len(attr_cov.values))
for attr_cov in op_cov.attr_coverages.values()]
uncovered_attrs = [
'{}: 0'.format(attr)
for attr in op_cov.schema.attributes
if attr not in op_cov.attr_coverages
]
attrs = sorted(covered_attrs) + sorted(uncovered_attrs)
if attrs:
attrs_column = os.linesep.join(attrs)
else:
attrs_column = 'No attributes'
rows.append([op_cov.op_type, attrs_column])
passed.append(op_cov.op_type)
writer.write(tabulate(
rows,
headers=['Operator', 'Attributes\n(name: #values)'],
tablefmt='plain'))
if os.environ.get(str('CSVDIR')) is not None:
self.report_csv(all_ops, passed, experimental)
# This function writes the coverage report to a set of CSV files for
# the Backend Scoreboard (onnx.ai/backend-scoreboard). To enable this
# feature, set a CSVDIR environment variable locally with the directory
# where you would like the files to be written, relative to the
# directory from which you're running pytest. The format of the CSV
# files is a column naming each op or model and columns for each
# backend with indications of whether the tests passed or failed for
# each row.
def report_csv(self, all_ops, passed, experimental): # type: (List[Text], List[Optional[Text]], List[Text]) -> None
for schema in _all_schemas:
if schema.domain == '' or schema.domain == 'ai.onnx':
all_ops.append(schema.name)
if schema.support_level == defs.OpSchema.SupportType.EXPERIMENTAL:
experimental.append(schema.name)
all_ops.sort()
nodes_path = os.path.join(str(os.environ.get('CSVDIR')), # type: ignore
'nodes.csv') # type: ignore
models_path = os.path.join(str(os.environ.get('CSVDIR')), # type: ignore
'models.csv') # type: ignore
existing_nodes = OrderedDict() # type: OrderedDict[Text, Dict[str, str]]
existing_models = OrderedDict() # type: OrderedDict[Text, Dict[str, str]]
frameworks = [] # type: List[str]
if os.path.isfile(nodes_path):
with open(nodes_path, 'r') as nodes_file:
reader = csv.DictReader(nodes_file)
frameworks = list(reader.fieldnames)
for row in reader:
op = row[str('Op')]
del row[str('Op')]
existing_nodes[str(op)] = row
if os.path.isfile(models_path):
with open(models_path, 'r') as models_file:
reader = csv.DictReader(models_file)
for row in reader:
model = row[str('Model')]
del row[str('Model')]
existing_models[str(model)] = row
backend = os.environ.get(str('BACKEND'))
other_frameworks = frameworks[1:]
with open(nodes_path, 'w') as nodes_file:
if str('Op') not in frameworks:
frameworks.append(str('Op'))
if backend not in frameworks:
frameworks.append(str(backend))
else:
other_frameworks.remove(str(backend))
node_writer = csv.DictWriter(nodes_file, fieldnames=frameworks)
node_writer.writeheader()
for node in all_ops:
node_name = node
if node in experimental:
node_name = node + ' (Experimental)'
if node_name not in existing_nodes:
# Also add Skipped for other nodes
existing_nodes[node_name] = OrderedDict()
for other_framework in other_frameworks:
existing_nodes[node_name][other_framework] = str("Skipped!")
if node in passed:
existing_nodes[node_name][str(backend)] = str("Passed!")
else:
existing_nodes[node_name][str(backend)] = str("Failed!")
summaries = dict() # type: Dict[Any, Any]
if "Summary" in existing_nodes:
summaries = existing_nodes["Summary"]
del existing_nodes["Summary"]
summaries[str(backend)] = \
"{}/{} node tests passed".format(len(passed), len(all_ops))
summaries['Op'] = 'Summary'
for node in existing_nodes:
existing_nodes[node][str('Op')] = str(node)
node_writer.writerow(existing_nodes[node])
node_writer.writerow(summaries)
with open(models_path, 'w') as models_file:
frameworks[0] = str("Model")
model_writer = csv.DictWriter(models_file, fieldnames=frameworks)
model_writer.writeheader()
# Consider both buckets
num_models = 0
for bucket in self.models:
for model in self.models[bucket]: # type: ignore
# Both analyze and run the model on the backend
num_covered = 0
for node in self.models[bucket][model].node_coverages:
if node in passed:
num_covered += 1
# TODO: Identify if there are models that are being
# skipped/not loaded, but that are in other frameworks
msg = "Passed!"
if bucket == 'loaded':
if model in self.models['passed']:
continue
msg = "Failed!"
num_models += 1
if model not in existing_models:
# Also add Skipped for other models
existing_models[model] = OrderedDict()
for other_framework in other_frameworks:
existing_models[model][other_framework] = str("Skipped!")
existing_models[model][str(backend)] = str("{}/{} nodes covered: {}"
.format(num_covered, len(self.models[bucket][model]
.node_coverages), msg))
summaries.clear()
if "Summary" in existing_models:
summaries = existing_models["Summary"]
del existing_models["Summary"]
if str(backend) in summaries:
del summaries[str(backend)]
summaries[str(backend)] = "{}/{} model tests passed" \
.format(len(self.models['passed']), num_models)
summaries['Model'] = 'Summary'
for model in existing_models: # type: ignore
existing_models[model][str('Model')] = model
model_writer.writerow(existing_models[model])
model_writer.writerow(summaries)
with open(os.path.join(str(os.environ.get('CSVDIR')), # type: ignore
'metadata.csv'), 'w') as metadata_file: # type: ignore
metadata_writer = csv.writer(metadata_file)
metadata_writer.writerow(["Latest Update", datetime.datetime.now().isoformat().replace('T', ' ')])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class ReporterBase(object):
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
def abs(input): # type: (np.ndarray) -> np.ndarray
return np.abs(input)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import importlib
import inspect
import sys
import pkgutil
from typing import Dict, Text
from types import ModuleType
def collect_sample_implementations(): # type: () -> Dict[Text, Text]
dict = {} # type: Dict[Text, Text]
_recursive_scan(sys.modules[__name__], dict)
return dict
def _recursive_scan(package, dict): # type: (ModuleType, Dict[Text, Text]) -> None
pkg_dir = package.__path__ # type: ignore
module_location = package.__name__
for _module_loader, name, ispkg in pkgutil.iter_modules(pkg_dir): # type: ignore
module_name = "{}.{}".format(module_location, name) # Module/package
module = importlib.import_module(module_name)
dict[name] = inspect.getsource(module)
if ispkg:
_recursive_scan(module, dict)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import defs
def main(): # type: () -> None
# domain -> support level -> name -> [schema]
with_inference = []
without_inference = []
for schema in defs.get_all_schemas():
domain, name, has_inference = schema.domain, schema.name, schema.has_type_and_shape_inference_function
if has_inference:
with_inference.append((domain, name))
else:
without_inference.append((domain, name))
print(len(with_inference), 'operators have a type/shape inference function.')
print(len(without_inference), 'do not. These are:')
for domain, name in sorted(without_inference):
print(domain, name)
if __name__ == '__main__':
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from onnx import AttributeProto, FunctionProto
import onnx.onnx_cpp2py_export.defs as C
from collections import defaultdict
from typing import List, Dict
ONNX_DOMAIN = ""
ONNX_ML_DOMAIN = 'ai.onnx.ml'
has = C.has_schema
get_schema = C.get_schema
get_all_schemas = C.get_all_schemas
get_all_schemas_with_history = C.get_all_schemas_with_history
def onnx_opset_version(): # type: () -> int
return C.schema_version_map()[ONNX_DOMAIN][1]
@property # type: ignore
def _Function_proto(self): # type: ignore
func_proto = FunctionProto()
func_proto.ParseFromString(self._function_body)
return func_proto
OpSchema = C.OpSchema # type: ignore
C.OpSchema.function_body = _Function_proto # type: ignore
@property # type: ignore
def _Attribute_default_value(self): # type: ignore
attr = AttributeProto()
attr.ParseFromString(self._default_value)
return attr
OpSchema.Attribute.default_value = _Attribute_default_value # type: ignore
def get_function_ops(): # type: () -> List[OpSchema]
schemas = C.get_all_schemas()
return [schema for schema in schemas if schema.has_function or schema.has_context_dependent_function] # type: ignore
|
#!/usr/bin/env python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict
import io
import os
import sys
import numpy as np # type: ignore
from onnx import defs, FunctionProto, helper, OperatorStatus
from onnx.defs import OpSchema, ONNX_DOMAIN, ONNX_ML_DOMAIN
from onnx.backend.test.case import collect_snippets
from onnx.backend.sample.ops import collect_sample_implementations
from typing import Any, Text, Sequence, Dict, List, Type, Set, Tuple
SNIPPETS = collect_snippets()
SAMPLE_IMPLEMENTATIONS = collect_sample_implementations()
ONNX_ML = not bool(os.getenv('ONNX_ML') == '0')
ext = '-ml.md' if ONNX_ML else '.md'
def display_number(v): # type: (int) -> Text
if defs.OpSchema.is_infinite(v):
return '∞'
return Text(v)
def should_render_domain(domain): # type: (Text) -> bool
if domain == ONNX_ML_DOMAIN and not ONNX_ML:
return False
if ONNX_ML and domain != ONNX_ML_DOMAIN:
return False
return True
def format_name_with_domain(domain, schema_name): # type: (Text, Text) -> Text
if domain:
return '{}.{}'.format(domain, schema_name)
return schema_name
def display_attr_type(v): # type: (OpSchema.AttrType) -> Text
assert isinstance(v, OpSchema.AttrType)
s = Text(v)
s = s[s.rfind('.') + 1:].lower()
if s[-1] == 's':
s = 'list of ' + s
return s
def display_domain(domain): # type: (Text) -> Text
if domain:
return "the '{}' operator set".format(domain)
return "the default ONNX operator set"
def display_domain_short(domain): # type: (Text) -> Text
if domain:
return domain
return 'ai.onnx (default)'
def display_version_link(name, version): # type: (Text, int) -> Text
changelog_md = 'Changelog' + ext
name_with_ver = '{}-{}'.format(name, version)
return '<a href="{}#{}">{}</a>'.format(changelog_md, name_with_ver, name_with_ver)
def display_schema(schema, versions): # type: (OpSchema, Sequence[OpSchema]) -> Text
s = ''
# doc
if schema.doc:
s += '\n'
s += '\n'.join(' ' + line
for line in schema.doc.lstrip().splitlines())
s += '\n'
# since version
s += '\n#### Version\n'
if schema.support_level == OpSchema.SupportType.EXPERIMENTAL:
s += '\nNo versioning maintained for experimental ops.'
else:
s += '\nThis version of the operator has been ' + ('deprecated' if schema.deprecated else 'available') + ' since version {}'.format(schema.since_version)
s += ' of {}.\n'.format(display_domain(schema.domain))
if len(versions) > 1:
# TODO: link to the Changelog.md
s += '\nOther versions of this operator: {}\n'.format(
', '.join(display_version_link(format_name_with_domain(v.domain, v.name),
v.since_version) for v in versions[:-1]))
# If this schema is deprecated, don't display any of the following sections
if schema.deprecated:
return s
# attributes
if schema.attributes:
s += '\n#### Attributes\n\n'
s += '<dl>\n'
for _, attr in sorted(schema.attributes.items()):
# option holds either required or default value
opt = ''
if attr.required:
opt = 'required'
elif attr.default_value.name:
default_value = helper.get_attribute_value(attr.default_value)
def format_value(value): # type: (Any) -> Text
if isinstance(value, float):
formatted = str(np.round(value, 5))
# use default formatting, unless too long.
if (len(formatted) > 10):
formatted = str("({:e})".format(value))
return formatted
elif isinstance(value, (bytes, bytearray)) and sys.version_info[0] == 3:
return str(value.decode('utf-8'))
return str(value)
if isinstance(default_value, list):
default_value = [format_value(val) for val in default_value]
else:
default_value = format_value(default_value)
opt = 'default is {}'.format(default_value)
s += '<dt><tt>{}</tt> : {}{}</dt>\n'.format(
attr.name,
display_attr_type(attr.type),
' ({})'.format(opt) if opt else '')
s += '<dd>{}</dd>\n'.format(attr.description)
s += '</dl>\n'
# inputs
s += '\n#### Inputs'
if schema.min_input != schema.max_input:
s += ' ({} - {})'.format(display_number(schema.min_input),
display_number(schema.max_input))
s += '\n\n'
if schema.inputs:
s += '<dl>\n'
for input in schema.inputs:
option_str = ""
if OpSchema.FormalParameterOption.Optional == input.option:
option_str = " (optional)"
elif OpSchema.FormalParameterOption.Variadic == input.option:
if input.isHomogeneous:
option_str = " (variadic)"
else:
option_str = " (variadic, heterogeneous)"
s += '<dt><tt>{}</tt>{} : {}</dt>\n'.format(input.name, option_str, input.typeStr)
s += '<dd>{}</dd>\n'.format(input.description)
s += '</dl>\n'
# outputs
s += '\n#### Outputs'
if schema.min_output != schema.max_output:
s += ' ({} - {})'.format(display_number(schema.min_output),
display_number(schema.max_output))
s += '\n\n'
if schema.outputs:
s += '<dl>\n'
for output in schema.outputs:
option_str = ""
if OpSchema.FormalParameterOption.Optional == output.option:
option_str = " (optional)"
elif OpSchema.FormalParameterOption.Variadic == output.option:
if output.isHomogeneous:
option_str = " (variadic)"
else:
option_str = " (variadic, heterogeneous)"
s += '<dt><tt>{}</tt>{} : {}</dt>\n'.format(output.name, option_str, output.typeStr)
s += '<dd>{}</dd>\n'.format(output.description)
s += '</dl>\n'
# type constraints
s += '\n#### Type Constraints'
s += '\n\n'
if schema.type_constraints:
s += '<dl>\n'
for type_constraint in schema.type_constraints:
allowedTypes = type_constraint.allowed_type_strs
if (len(allowedTypes) > 0):
allowedTypeStr = allowedTypes[0]
for allowedType in allowedTypes[1:]:
allowedTypeStr += ', ' + allowedType
s += '<dt><tt>{}</tt> : {}</dt>\n'.format(
type_constraint.type_param_str, allowedTypeStr)
s += '<dd>{}</dd>\n'.format(type_constraint.description)
s += '</dl>\n'
# Function Body
# TODO: this should be refactored to show the function body graph's picture (DAG).
#if schema.has_function or schema.has_context_dependent_function: # type: ignore
# s += '\n#### Function\n'
# s += '\nThe Function can be represented as a function.\n'
return s
def support_level_str(level): # type: (OpSchema.SupportType) -> Text
return \
"<sub>experimental</sub> " if level == OpSchema.SupportType.EXPERIMENTAL else ""
def main(args): # type: (Type[Args]) -> None
with io.open(args.changelog, 'w', newline='') as fout:
fout.write('## Operator Changelog\n')
fout.write(
"*This file is automatically generated from the\n"
" [def files](/onnx/defs) via [this script](/onnx/defs/gen_doc.py).\n"
" Do not modify directly and instead edit operator definitions.*\n")
# domain -> version -> [schema]
dv_index = defaultdict(lambda: defaultdict(list)) # type: Dict[Text, Dict[int, List[OpSchema]]]
for schema in defs.get_all_schemas_with_history():
dv_index[schema.domain][schema.since_version].append(schema)
fout.write('\n')
for domain, versionmap in sorted(dv_index.items()):
if not should_render_domain(domain):
continue
s = '# {}\n'.format(display_domain_short(domain))
for version, unsorted_schemas in sorted(versionmap.items()):
s += '## Version {} of {}\n'.format(version, display_domain(domain))
for schema in sorted(unsorted_schemas, key=lambda s: s.name):
name_with_ver = '{}-{}'.format(format_name_with_domain(domain, schema.name),
schema.since_version)
s += ('### <a name="{}"></a>**{}**' + (' (deprecated)' if schema.deprecated else '') + '</a>\n').format(name_with_ver, name_with_ver)
s += display_schema(schema, [schema])
s += '\n'
fout.write(s)
with io.open(args.output, 'w', newline='', encoding="utf-8") as fout:
fout.write('## Operator Schemas\n')
fout.write(
"*This file is automatically generated from the\n"
" [def files](/onnx/defs) via [this script](/onnx/defs/gen_doc.py).\n"
" Do not modify directly and instead edit operator definitions.*\n")
# domain -> support level -> name -> [schema]
index = defaultdict(lambda: defaultdict(lambda: defaultdict(list))) # type: Dict[Text, Dict[int, Dict[Text, List[OpSchema]]]]
for schema in defs.get_all_schemas_with_history():
index[schema.domain][int(schema.support_level)][schema.name].append(schema)
fout.write('\n')
# Preprocess the Operator Schemas
# [(domain, [(support_level, [(schema name, current schema, all versions schemas)])])]
operator_schemas = list() # type: List[Tuple[Text, List[Tuple[int, List[Tuple[Text, OpSchema, List[OpSchema]]]]]]]
exsting_ops = set() # type: Set[Text]
for domain, _supportmap in sorted(index.items()):
if not should_render_domain(domain):
continue
processed_supportmap = list()
for _support, _namemap in sorted(_supportmap.items()):
processed_namemap = list()
for n, unsorted_versions in sorted(_namemap.items()):
versions = sorted(unsorted_versions, key=lambda s: s.since_version)
schema = versions[-1]
if schema.name in exsting_ops:
continue
exsting_ops.add(schema.name)
processed_namemap.append((n, schema, versions))
processed_supportmap.append((_support, processed_namemap))
operator_schemas.append((domain, processed_supportmap))
# Table of contents
for domain, supportmap in operator_schemas:
s = '* {}\n'.format(display_domain_short(domain))
fout.write(s)
function_ops = list()
for _, namemap in supportmap:
for n, schema, versions in namemap:
if schema.has_function or schema.has_context_dependent_function: # type: ignore
function_ops.append((n, schema, versions))
continue
s = ' * {}<a href="#{}">{}</a>\n'.format(
support_level_str(schema.support_level),
format_name_with_domain(domain, n),
format_name_with_domain(domain, n))
fout.write(s)
if len(function_ops):
fout.write('\n')
fout.write(' **Functions**\n')
for n, schema, versions in function_ops:
s = ' * {}<a href="#{}">{}</a>\n'.format(
support_level_str(schema.support_level),
format_name_with_domain(domain, n),
format_name_with_domain(domain, n))
fout.write(s)
fout.write('\n')
for domain, supportmap in operator_schemas:
s = '## {}\n'.format(display_domain_short(domain))
fout.write(s)
for _, namemap in supportmap:
for op_type, schema, versions in namemap:
# op_type
s = ('### {}<a name="{}"></a><a name="{}">**{}**' + (' (deprecated)' if schema.deprecated else '') + '</a>\n').format(
support_level_str(schema.support_level),
format_name_with_domain(domain, op_type),
format_name_with_domain(domain, op_type.lower()),
format_name_with_domain(domain, op_type))
s += display_schema(schema, versions)
s += '\n\n'
if op_type in SNIPPETS:
s += '#### Examples\n\n'
for summary, code in sorted(SNIPPETS[op_type]):
s += '<details>\n'
s += '<summary>{}</summary>\n\n'.format(summary)
s += '```python\n{}\n```\n\n'.format(code)
s += '</details>\n'
s += '\n\n'
if op_type.lower() in SAMPLE_IMPLEMENTATIONS:
s += '#### Sample Implementation\n\n'
s += '<details>\n'
s += '<summary>{}</summary>\n\n'.format(op_type)
s += '```python\n{}\n```\n\n'.format(SAMPLE_IMPLEMENTATIONS[op_type.lower()])
s += '</details>\n'
s += '\n\n'
fout.write(s)
if __name__ == '__main__':
base_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
docs_dir = os.path.join(base_dir, 'docs')
class Args(object):
output = os.path.join(docs_dir, 'Operators' + ext)
changelog = os.path.join(docs_dir, 'Changelog' + ext)
main(Args)
|
import distutils.command.clean
import glob
import os
import shutil
import subprocess
import sys
import torch
from setuptools import find_packages, setup
from torch.utils.cpp_extension import (
BuildExtension,
CppExtension,
CUDA_HOME,
CUDAExtension,
)
version = open("version.txt", "r").read().strip()
sha = "Unknown"
package_name = "torchcsprng"
cwd = os.path.dirname(os.path.abspath(__file__))
try:
sha = (
subprocess.check_output(["git", "rev-parse", "HEAD"], cwd=cwd)
.decode("ascii")
.strip()
)
except Exception:
pass
if os.getenv("BUILD_VERSION"):
version = os.getenv("BUILD_VERSION")
elif sha != "Unknown":
version += "+" + sha[:7]
print("Building wheel {}-{}".format(package_name, version))
def write_version_file():
version_path = os.path.join(cwd, "torchcsprng", "version.py")
with open(version_path, "w") as f:
f.write("__version__ = '{}'\n".format(version))
f.write("git_version = {}\n".format(repr(sha)))
# f.write("from torchcsprng.extension import _check_cuda_version\n")
# f.write("if _check_cuda_version() > 0:\n")
# f.write(" cuda = _check_cuda_version()\n")
write_version_file()
with open("README.md", "r") as fh:
long_description = fh.read()
requirements = [
"torch",
]
def append_flags(flags, flags_to_append):
for flag in flags_to_append:
if not flag in flags:
flags.append(flag)
return flags
def get_extensions():
build_cuda = torch.cuda.is_available() or os.getenv("FORCE_CUDA", "0") == "1"
module_name = "torchcsprng"
extensions_dir = os.path.join(cwd, module_name, "csrc")
openmp = "ATen parallel backend: OpenMP" in torch.__config__.parallel_info()
main_file = glob.glob(os.path.join(extensions_dir, "*.cpp"))
source_cpu = glob.glob(os.path.join(extensions_dir, "cpu", "*.cpp"))
sources = main_file + source_cpu
extension = CppExtension
define_macros = []
cxx_flags = os.getenv("CXX_FLAGS", "")
if cxx_flags == "":
cxx_flags = []
else:
cxx_flags = cxx_flags.split(" ")
if openmp:
if sys.platform == "linux":
cxx_flags = append_flags(cxx_flags, ["-fopenmp"])
elif sys.platform == "win32":
cxx_flags = append_flags(cxx_flags, ["/openmp"])
# elif sys.platform == 'darwin':
# cxx_flags = append_flags(cxx_flags, ['-Xpreprocessor', '-fopenmp'])
if build_cuda:
extension = CUDAExtension
source_cuda = glob.glob(os.path.join(extensions_dir, "cuda", "*.cu"))
sources += source_cuda
define_macros += [("WITH_CUDA", None)]
nvcc_flags = os.getenv("NVCC_FLAGS", "")
if nvcc_flags == "":
nvcc_flags = []
else:
nvcc_flags = nvcc_flags.split(" ")
nvcc_flags = append_flags(nvcc_flags, ["--expt-extended-lambda", "-Xcompiler"])
extra_compile_args = {
"cxx": cxx_flags,
"nvcc": nvcc_flags,
}
else:
extra_compile_args = {
"cxx": cxx_flags,
}
ext_modules = [
extension(
module_name + "._C",
sources,
define_macros=define_macros,
extra_compile_args=extra_compile_args,
)
]
return ext_modules
class clean(distutils.command.clean.clean):
def run(self):
with open(".gitignore", "r") as f:
ignores = f.read()
start_deleting = False
for wildcard in filter(None, ignores.split("\n")):
if (
wildcard
== "# do not change or delete this comment - `python setup.py clean` deletes everything after this line"
):
start_deleting = True
if not start_deleting:
continue
for filename in glob.glob(wildcard):
try:
os.remove(filename)
except OSError:
shutil.rmtree(filename, ignore_errors=True)
# It's an old-style class in Python 2.7...
distutils.command.clean.clean.run(self)
setup(
# Metadata
name=package_name,
version=version,
author="Pavel Belevich",
author_email="[email protected]",
url="https://github.com/pytorch/csprng",
description="Cryptographically secure pseudorandom number generators for PyTorch",
long_description=long_description,
long_description_content_type="text/markdown",
license="BSD-3",
# Package info
packages=find_packages(exclude=("test",)),
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: BSD License",
"Programming Language :: C++",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Scientific/Engineering",
"Topic :: Scientific/Engineering :: Mathematics",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development",
"Topic :: Software Development :: Libraries",
"Topic :: Software Development :: Libraries :: Python Modules",
],
python_requires=">=3.6",
install_requires=requirements,
ext_modules=get_extensions(),
test_suite="test",
cmdclass={
"build_ext": BuildExtension,
"clean": clean,
},
)
|
# -*- coding: utf-8 -*-
"""Helper script to package wheels and relocate binaries."""
import glob
import hashlib
import io
# Standard library imports
import os
import os.path as osp
import platform
import shutil
import subprocess
import sys
import zipfile
from base64 import urlsafe_b64encode
# Third party imports
if sys.platform == "linux":
from auditwheel.lddtree import lddtree
from wheel.bdist_wheel import get_abi_tag
ALLOWLIST = {
"libgcc_s.so.1",
"libstdc++.so.6",
"libm.so.6",
"libdl.so.2",
"librt.so.1",
"libc.so.6",
"libnsl.so.1",
"libutil.so.1",
"libpthread.so.0",
"libresolv.so.2",
"libX11.so.6",
"libXext.so.6",
"libXrender.so.1",
"libICE.so.6",
"libSM.so.6",
"libGL.so.1",
"libgobject-2.0.so.0",
"libgthread-2.0.so.0",
"libglib-2.0.so.0",
"ld-linux-x86-64.so.2",
"ld-2.17.so",
}
WINDOWS_ALLOWLIST = {
"MSVCP140.dll",
"KERNEL32.dll",
"VCRUNTIME140_1.dll",
"VCRUNTIME140.dll",
"api-ms-win-crt-heap-l1-1-0.dll",
"api-ms-win-crt-runtime-l1-1-0.dll",
"api-ms-win-crt-stdio-l1-1-0.dll",
"api-ms-win-crt-filesystem-l1-1-0.dll",
"api-ms-win-crt-string-l1-1-0.dll",
"api-ms-win-crt-environment-l1-1-0.dll",
"api-ms-win-crt-math-l1-1-0.dll",
"api-ms-win-crt-convert-l1-1-0.dll",
}
HERE = osp.dirname(osp.abspath(__file__))
PACKAGE_ROOT = osp.dirname(osp.dirname(HERE))
PLATFORM_ARCH = platform.machine()
PYTHON_VERSION = sys.version_info
def read_chunks(file, size=io.DEFAULT_BUFFER_SIZE):
"""Yield pieces of data from a file-like object until EOF."""
while True:
chunk = file.read(size)
if not chunk:
break
yield chunk
def rehash(path, blocksize=1 << 20):
"""Return (hash, length) for path using hashlib.sha256()"""
h = hashlib.sha256()
length = 0
with open(path, "rb") as f:
for block in read_chunks(f, size=blocksize):
length += len(block)
h.update(block)
digest = "sha256=" + urlsafe_b64encode(h.digest()).decode("latin1").rstrip("=")
# unicode/str python2 issues
return (digest, str(length)) # type: ignore
def unzip_file(file, dest):
"""Decompress zip `file` into directory `dest`."""
with zipfile.ZipFile(file, "r") as zip_ref:
zip_ref.extractall(dest)
def is_program_installed(basename):
"""
Return program absolute path if installed in PATH.
Otherwise, return None
On macOS systems, a .app is considered installed if
it exists.
"""
if sys.platform == "darwin" and basename.endswith(".app") and osp.exists(basename):
return basename
for path in os.environ["PATH"].split(os.pathsep):
abspath = osp.join(path, basename)
if osp.isfile(abspath):
return abspath
def find_program(basename):
"""
Find program in PATH and return absolute path
Try adding .exe or .bat to basename on Windows platforms
(return None if not found)
"""
names = [basename]
if os.name == "nt":
# Windows platforms
extensions = (".exe", ".bat", ".cmd", ".dll")
if not basename.endswith(extensions):
names = [basename + ext for ext in extensions] + [basename]
for name in names:
path = is_program_installed(name)
if path:
return path
def patch_new_path(library_path, new_dir):
library = osp.basename(library_path)
name, *rest = library.split(".")
rest = ".".join(rest)
hash_id = hashlib.sha256(library_path.encode("utf-8")).hexdigest()[:8]
new_name = ".".join([name, hash_id, rest])
return osp.join(new_dir, new_name)
def find_dll_dependencies(dumpbin, binary):
out = subprocess.run([dumpbin, "/dependents", binary], stdout=subprocess.PIPE)
out = out.stdout.strip().decode("utf-8")
start_index = out.find("dependencies:") + len("dependencies:")
end_index = out.find("Summary")
dlls = out[start_index:end_index].strip()
dlls = dlls.split(os.linesep)
dlls = [dll.strip() for dll in dlls]
return dlls
def relocate_elf_library(patchelf, output_dir, output_library, binary):
"""
Relocate an ELF shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel while updating their respective rpaths.
"""
print("Relocating {0}".format(binary))
binary_path = osp.join(output_library, binary)
ld_tree = lddtree(binary_path)
tree_libs = ld_tree["libs"]
binary_queue = [(n, binary) for n in ld_tree["needed"]]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
library_info = tree_libs[library]
print(library)
if library_info["path"] is None:
print("Omitting {0}".format(library))
continue
if library in ALLOWLIST:
# Omit glibc/gcc/system libraries
print("Omitting {0}".format(library))
continue
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_info["path"]
binary_queue += [(n, library) for n in library_info["needed"]]
print("Copying dependencies to wheel directory")
new_libraries_path = osp.join(output_dir, "torchcsprng.libs")
os.makedirs(new_libraries_path)
new_names = {binary: binary_path}
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = patch_new_path(library_path, new_libraries_path)
print("{0} -> {1}".format(library, new_library_path))
shutil.copyfile(library_path, new_library_path)
new_names[library] = new_library_path
print("Updating dependency names by new files")
for library in binary_paths:
if library != binary:
if library not in binary_dependencies:
continue
library_dependencies = binary_dependencies[library]
new_library_name = new_names[library]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print("{0}: {1} -> {2}".format(library, dep, new_dep))
subprocess.check_output(
[patchelf, "--replace-needed", dep, new_dep, new_library_name],
cwd=new_libraries_path,
)
print("Updating library rpath")
subprocess.check_output(
[patchelf, "--set-rpath", "$ORIGIN", new_library_name],
cwd=new_libraries_path,
)
subprocess.check_output(
[patchelf, "--print-rpath", new_library_name], cwd=new_libraries_path
)
print("Update library dependencies")
library_dependencies = binary_dependencies[binary]
for dep in library_dependencies:
new_dep = osp.basename(new_names[dep])
print("{0}: {1} -> {2}".format(binary, dep, new_dep))
subprocess.check_output(
[patchelf, "--replace-needed", dep, new_dep, binary], cwd=output_library
)
print("Update library rpath")
subprocess.check_output(
[patchelf, "--set-rpath", "$ORIGIN:$ORIGIN/../torchcsprng.libs", binary_path],
cwd=output_library,
)
def relocate_dll_library(dumpbin, output_dir, output_library, binary):
"""
Relocate a DLL/PE shared library to be packaged on a wheel.
Given a shared library, find the transitive closure of its dependencies,
rename and copy them into the wheel.
"""
print("Relocating {0}".format(binary))
binary_path = osp.join(output_library, binary)
library_dlls = find_dll_dependencies(dumpbin, binary_path)
binary_queue = [(dll, binary) for dll in library_dlls]
binary_paths = {binary: binary_path}
binary_dependencies = {}
while binary_queue != []:
library, parent = binary_queue.pop(0)
if library in WINDOWS_ALLOWLIST or library.startswith("api-ms-win"):
print("Omitting {0}".format(library))
continue
library_path = find_program(library)
if library_path is None:
print("{0} not found".format(library))
continue
if osp.basename(osp.dirname(library_path)) == "system32":
continue
print("{0}: {1}".format(library, library_path))
parent_dependencies = binary_dependencies.get(parent, [])
parent_dependencies.append(library)
binary_dependencies[parent] = parent_dependencies
if library in binary_paths:
continue
binary_paths[library] = library_path
downstream_dlls = find_dll_dependencies(dumpbin, library_path)
binary_queue += [(n, library) for n in downstream_dlls]
print("Copying dependencies to wheel directory")
package_dir = osp.join(output_dir, "torchcsprng")
for library in binary_paths:
if library != binary:
library_path = binary_paths[library]
new_library_path = osp.join(package_dir, library)
print("{0} -> {1}".format(library, new_library_path))
shutil.copyfile(library_path, new_library_path)
def compress_wheel(output_dir, wheel, wheel_dir, wheel_name):
"""Create RECORD file and compress wheel distribution."""
print("Update RECORD file in wheel")
dist_info = glob.glob(osp.join(output_dir, "*.dist-info"))[0]
record_file = osp.join(dist_info, "RECORD")
with open(record_file, "w") as f:
for root, _, files in os.walk(output_dir):
for this_file in files:
full_file = osp.join(root, this_file)
rel_file = osp.relpath(full_file, output_dir)
if full_file == record_file:
f.write("{0},,\n".format(rel_file))
else:
digest, size = rehash(full_file)
f.write("{0},{1},{2}\n".format(rel_file, digest, size))
print("Compressing wheel")
base_wheel_name = osp.join(wheel_dir, wheel_name)
shutil.make_archive(base_wheel_name, "zip", output_dir)
os.remove(wheel)
shutil.move("{0}.zip".format(base_wheel_name), wheel)
shutil.rmtree(output_dir)
def patch_linux():
# Get patchelf location
patchelf = find_program("patchelf")
if patchelf is None:
raise FileNotFoundError(
"Patchelf was not found in the system, please"
" make sure that is available on the PATH."
)
# Find wheel
print("Finding wheels...")
wheels = glob.glob(osp.join(PACKAGE_ROOT, "dist", "*.whl"))
output_dir = osp.join(PACKAGE_ROOT, "dist", ".wheel-process")
image_binary = "image.so"
video_binary = "video_reader.so"
torchcsprng_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Unzipping wheel...")
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print("{0}".format(wheel_file))
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print("Finding ELF dependencies...")
output_library = osp.join(output_dir, "torchcsprng")
for binary in torchcsprng_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_elf_library(patchelf, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
def patch_win():
# Get dumpbin location
dumpbin = find_program("dumpbin")
if dumpbin is None:
raise FileNotFoundError(
"Dumpbin was not found in the system, please"
" make sure that is available on the PATH."
)
# Find wheel
print("Finding wheels...")
wheels = glob.glob(osp.join(PACKAGE_ROOT, "dist", "*.whl"))
output_dir = osp.join(PACKAGE_ROOT, "dist", ".wheel-process")
image_binary = "image.pyd"
video_binary = "video_reader.pyd"
torchcsprng_binaries = [image_binary, video_binary]
for wheel in wheels:
if osp.exists(output_dir):
shutil.rmtree(output_dir)
os.makedirs(output_dir)
print("Unzipping wheel...")
wheel_file = osp.basename(wheel)
wheel_dir = osp.dirname(wheel)
print("{0}".format(wheel_file))
wheel_name, _ = osp.splitext(wheel_file)
unzip_file(wheel, output_dir)
print("Finding DLL/PE dependencies...")
output_library = osp.join(output_dir, "torchcsprng")
for binary in torchcsprng_binaries:
if osp.exists(osp.join(output_library, binary)):
relocate_dll_library(dumpbin, output_dir, output_library, binary)
compress_wheel(output_dir, wheel, wheel_dir, wheel_name)
if __name__ == "__main__":
if sys.platform == "linux":
patch_linux()
elif sys.platform == "win32":
patch_win()
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import math
import os
import random
import time
import unittest
import numpy as np
import torch
from Crypto.Cipher import AES
from Crypto.Util import Counter
from scipy import stats
try:
import torchcsprng as csprng
except ImportError:
raise RuntimeError("CSPRNG not available")
IS_SANDCASTLE = (
os.getenv("SANDCASTLE") == "1" or os.getenv("TW_JOB_USER") == "sandcastle"
)
IS_FBCODE = os.getenv("PYTORCH_TEST_FBCODE") == "1"
def to_numpy(t, dtype=torch.float):
if t.dtype == torch.bfloat16:
t = t.to(dtype)
return t.numpy()
def to_bytes(t):
if t.dtype == torch.bfloat16:
t = t.view(torch.int16)
return t.cpu().numpy().view(np.int8)
class TestCSPRNG(unittest.TestCase):
all_generators = [
csprng.create_random_device_generator(),
csprng.create_random_device_generator("/dev/urandom"),
csprng.create_mt19937_generator(),
csprng.create_mt19937_generator(42),
]
int_dtypes = [torch.uint8, torch.int8, torch.int16, torch.int32, torch.int64]
standard_fp_dtypes = [torch.float, torch.double]
non_standard_fp_dtypes = [torch.half, torch.bfloat16]
fp_dtypes = standard_fp_dtypes + non_standard_fp_dtypes
num_dtypes = int_dtypes + fp_dtypes
all_dtypes = num_dtypes + [torch.bool]
size = 1000
all_devices = (
["cpu", "cuda"]
if (torch.cuda.is_available() and csprng.supports_cuda())
else ["cpu"]
)
def test_random_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.num_dtypes:
if dtype == torch.float:
to_inc = 2**24
elif dtype == torch.double:
to_inc = 2**53
elif dtype == torch.half:
to_inc = 2**11
elif dtype == torch.bfloat16:
to_inc = 2**8
else:
to_inc = torch.iinfo(dtype).max
t = torch.empty(self.size, dtype=dtype, device=device).random_(
generator=gen
)
res = stats.kstest(
to_numpy(t.cpu()), stats.randint.cdf, args=(0, to_inc)
)
self.assertTrue(res.statistic < 0.1)
no_cuda = not torch.cuda.is_available() or not csprng.supports_cuda()
no_cuda_message = (
"CUDA is not available or csprng was not compiled with CUDA support"
)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_cpu_vs_cuda(self):
for dtype in self.num_dtypes:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").random_(
generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").random_(
generator=gen
)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_random_to_kstest(self):
to_ = 42
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.num_dtypes:
t = torch.zeros(self.size, dtype=dtype, device=device).random_(
to_, generator=gen
)
res = stats.kstest(
to_numpy(t.cpu()), stats.randint.cdf, args=(0, to_)
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_to_cpu_vs_cuda(self):
to_ = 42
for dtype in self.num_dtypes:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.zeros(self.size, dtype=dtype, device="cpu").random_(
to_, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.zeros(self.size, dtype=dtype, device="cuda").random_(
to_, generator=gen
)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_random_from_to_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.num_dtypes:
for from_ in [0, 24, 42]:
for to_ in [42, 99, 123]:
if from_ < to_:
t = torch.zeros(
self.size, dtype=dtype, device=device
).random_(from_, to_, generator=gen)
res = stats.kstest(
to_numpy(t.cpu()),
stats.randint.cdf,
args=(from_, to_),
)
self.assertTrue(res.statistic < 0.2)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_from_to_cpu_vs_cuda(self):
for dtype in self.num_dtypes:
for from_ in [0, 24, 42]:
for to_ in [42, 99, 123]:
if from_ < to_:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.zeros(
self.size, dtype=dtype, device="cpu"
).random_(from_, to_, generator=gen)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.zeros(
self.size, dtype=dtype, device="cuda"
).random_(from_, to_, generator=gen)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_random_bool(self):
for device in self.all_devices:
for gen in self.all_generators:
t = torch.empty(self.size, dtype=torch.bool, device=device)
t.fill_(False)
t.random_(generator=gen)
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(
0.4 < (t.eq(True)).to(torch.int).sum().item() / self.size < 0.6
)
t.fill_(True)
t.random_(generator=gen)
self.assertEqual(t.min(), False)
self.assertEqual(t.max(), True)
self.assertTrue(
0.4 < (t.eq(True)).to(torch.int).sum().item() / self.size < 0.6
)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_random_bool_cpu_vs_cuda(self):
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=torch.bool, device="cpu").random_(
generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=torch.bool, device="cuda").random_(
generator=gen
)
self.assertTrue((cpu_t == cuda_t.cpu()).all())
def test_uniform_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for from_ in [-42, 0, 4.2]:
for to_ in [-4.2, 0, 42]:
if to_ > from_:
t = torch.empty(
self.size, dtype=dtype, device=device
).uniform_(from_, to_, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"uniform",
args=(from_, (to_ - from_)),
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_uniform_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for from_ in [-42, 0, 4.2]:
for to_ in [-4.2, 0, 42]:
if to_ > from_:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(
self.size, dtype=dtype, device="cpu"
).uniform_(from_, to_, generator=gen)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(
self.size, dtype=dtype, device="cuda"
).uniform_(from_, to_, generator=gen)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_normal_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
t = torch.empty(
self.size, dtype=dtype, device=device
).normal_(mean=mean, std=std, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"norm",
args=(mean, std),
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_normal_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").normal_(
mean=mean, std=std, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").normal_(
mean=mean, std=std, generator=gen
)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_log_normal_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
t = torch.empty(
self.size, dtype=dtype, device=device
).log_normal_(mean=mean, std=std, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"lognorm",
args=(std, 0, math.exp(mean)),
)
if dtype in [torch.half, torch.bfloat16]:
self.assertTrue(res.statistic < 0.4)
else:
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_log_normal_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for mean in [-3, 0, 7]:
for std in [1, 5, 7]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(
self.size, dtype=dtype, device="cpu"
).log_normal_(mean=mean, std=std, generator=gen)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(
self.size, dtype=dtype, device="cuda"
).log_normal_(mean=mean, std=std, generator=gen)
self.assertTrue(
torch.allclose(cpu_t, cuda_t.cpu(), 1e-4, equal_nan=True)
)
def test_exponential_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for lambd in [0.5, 1.0, 5.0]:
t = torch.empty(
self.size, dtype=dtype, device=device
).exponential_(lambd=lambd, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"expon",
args=(
0,
1 / lambd,
),
)
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
@unittest.skip("https://github.com/pytorch/pytorch/issues/38662")
def test_exponential_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for lambd in [0.5, 1.0, 5.0]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").exponential_(
lambd=lambd, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(
self.size, dtype=dtype, device="cuda"
).exponential_(lambd=lambd, generator=gen)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_cauchy_kstest(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for median in [-10, 0, 50]:
for sigma in [0.5, 1.0, 10.0]:
t = torch.empty(
self.size, dtype=dtype, device=device
).cauchy_(median=median, sigma=sigma, generator=gen)
res = stats.kstest(
to_numpy(t.cpu(), torch.double),
"cauchy",
args=(median, sigma),
)
if dtype in [torch.half, torch.bfloat16]:
self.assertTrue(res.statistic < 0.4)
else:
self.assertTrue(res.statistic < 0.1)
@unittest.skipIf(no_cuda, no_cuda_message)
def test_cauchy_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for median in [-10, 0, 50]:
for sigma in [0.5, 1.0, 10.0]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").cauchy_(
median=median, sigma=sigma, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").cauchy_(
median=median, sigma=sigma, generator=gen
)
self.assertTrue(torch.allclose(cpu_t, cuda_t.cpu(), 1e-9))
def test_geometric(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.fp_dtypes:
for p in [0.2, 0.5, 0.8]:
t = torch.empty(
self.size, dtype=dtype, device=device
).geometric_(p=p, generator=gen)
# actual = np.histogram(t.cpu().to(torch.double), np.arange(1, 100))[0]
# expected = stats.geom(p).pmf(np.arange(1, 99)) * self.size
# res = stats.chisquare(actual, expected)
# self.assertAlmostEqual(res.pvalue, 1.0, delta=0.5) TODO https://github.com/pytorch/csprng/issues/7
@unittest.skipIf(no_cuda, no_cuda_message)
def test_geometric_cpu_vs_cuda(self):
for dtype in self.fp_dtypes:
for p in [0.2, 0.5, 0.8]:
gen = csprng.create_mt19937_generator(42)
cpu_t = torch.empty(self.size, dtype=dtype, device="cpu").geometric_(
p=p, generator=gen
)
gen = csprng.create_mt19937_generator(42)
cuda_t = torch.empty(self.size, dtype=dtype, device="cuda").geometric_(
p=p, generator=gen
)
self.assertTrue(
torch.allclose(cpu_t, cuda_t.cpu(), 1e-9, equal_nan=True)
)
def test_non_contiguous_vs_contiguous(self):
size = 10
for device in self.all_devices:
for dtype in self.all_dtypes:
for i in range(10):
t = torch.zeros([size, size, size], dtype=dtype, device=device)
x1 = random.randrange(0, size)
y1 = random.randrange(0, size)
z1 = random.randrange(0, size)
x2 = random.randrange(x1 + 1, max(x1 + 2, size))
y2 = random.randrange(y1 + 1, max(y1 + 2, size))
z2 = random.randrange(z1 + 1, max(z1 + 2, size))
maybe_non_contiguous = t[x1:x2, y1:y2, z1:z2]
assert maybe_non_contiguous.numel() > 0
if not maybe_non_contiguous.is_contiguous():
seed = random.randrange(1000)
non_contiguous = maybe_non_contiguous
gen = csprng.create_mt19937_generator(seed)
non_contiguous.random_(generator=gen)
contiguous = torch.zeros_like(non_contiguous)
gen = csprng.create_mt19937_generator(seed)
contiguous.random_(generator=gen)
assert contiguous.is_contiguous()
self.assertTrue((non_contiguous == contiguous).all())
for x in range(0, size):
for y in range(0, size):
for z in range(0, size):
if (
not x1 <= x < x2
and not y1 <= y < y2
and not z1 <= z < z2
):
self.assertTrue(t[x, y, z] == 0)
@unittest.skipIf(IS_SANDCASTLE or IS_FBCODE, "Does not work on Sandcastle")
@unittest.skipIf(torch.get_num_threads() < 2, "requires multithreading CPU")
def test_cpu_parallel(self):
urandom_gen = csprng.create_random_device_generator("/dev/urandom")
def measure(size):
t = torch.empty(size, dtype=torch.float32, device="cpu")
start = time.time()
for i in range(20):
t.normal_(generator=urandom_gen)
finish = time.time()
return finish - start
time_for_1K = measure(1000)
time_for_1M = measure(1000000)
# Pessimistic check that parallel execution gives >= 1.5 performance boost
self.assertTrue(time_for_1M / time_for_1K < 1000 / 1.5)
@unittest.skipIf(IS_SANDCASTLE or IS_FBCODE, "Does not work on Sandcastle")
def test_version(self):
self.assertTrue(csprng.__version__)
self.assertTrue(csprng.git_version)
def test_randperm(self):
for device in self.all_devices:
for gen in self.all_generators:
for dtype in self.int_dtypes:
for size in range(0, 20):
expected = torch.arange(size, dtype=dtype, device=device)
actual = torch.randperm(
size, dtype=dtype, device=device, generator=gen
)
actual_out = torch.empty(1, dtype=dtype, device=device)
torch.randperm(size, out=actual_out, generator=gen)
if size >= 10:
self.assertTrue(not torch.allclose(expected, actual))
self.assertTrue(not torch.allclose(expected, actual_out))
actual = actual.sort()[0]
actual_out = actual.sort()[0]
self.assertTrue(torch.allclose(expected, actual))
self.assertTrue(torch.allclose(expected, actual_out))
def test_encrypt_decrypt(self):
key_size_bytes = 16
block_size_bytes = 16
def sizeof(dtype):
if dtype == torch.bool:
return 1
elif dtype.is_floating_point:
return torch.finfo(dtype).bits // 8
else:
return torch.iinfo(dtype).bits // 8
def pad(data, pad_size):
if len(data) % pad_size == 0:
return data
length = pad_size - (len(data) % pad_size)
return data + bytes([0]) * length
def create_aes(m, k):
if m == "ecb":
return AES.new(k.tobytes(), AES.MODE_ECB)
elif m == "ctr":
ctr = Counter.new(
AES.block_size * 8, initial_value=0, little_endian=True
)
return AES.new(k.tobytes(), AES.MODE_CTR, counter=ctr)
else:
return None
for key_dtype in self.all_dtypes:
key_size = key_size_bytes // sizeof(key_dtype)
key = torch.empty(key_size, dtype=key_dtype).random_()
key_np = to_bytes(key)
for initial_dtype in self.all_dtypes:
for initial_size in [0, 4, 8, 15, 16, 23, 42]:
initial = torch.empty(initial_size, dtype=initial_dtype).random_()
initial_np = to_bytes(initial)
initial_size_bytes = initial_size * sizeof(initial_dtype)
for encrypted_dtype in self.all_dtypes:
encrypted_size = (
(initial_size_bytes + block_size_bytes - 1)
// block_size_bytes
* block_size_bytes
// sizeof(encrypted_dtype)
)
encrypted = torch.zeros(encrypted_size, dtype=encrypted_dtype)
for decrypted_dtype in self.all_dtypes:
decrypted_size = (
initial_size_bytes + sizeof(decrypted_dtype) - 1
) // sizeof(decrypted_dtype)
decrypted = torch.zeros(
decrypted_size, dtype=decrypted_dtype
)
for mode in ["ecb", "ctr"]:
for device in self.all_devices:
key = key.to(device)
initial = initial.to(device)
encrypted = encrypted.to(device)
decrypted = decrypted.to(device)
csprng.encrypt(
initial, encrypted, key, "aes128", mode
)
encrypted_np = to_bytes(encrypted)
aes = create_aes(mode, key_np)
encrypted_expected = np.frombuffer(
aes.encrypt(
pad(initial_np.tobytes(), block_size_bytes)
),
dtype=np.int8,
)
self.assertTrue(
np.array_equal(encrypted_np, encrypted_expected)
)
csprng.decrypt(
encrypted, decrypted, key, "aes128", mode
)
decrypted_np = to_bytes(decrypted)[
:initial_size_bytes
]
aes = create_aes(mode, key_np)
decrypted_expected = np.frombuffer(
aes.decrypt(
pad(
encrypted_np.tobytes(), block_size_bytes
)
),
dtype=np.int8,
)[:initial_size_bytes]
self.assertTrue(
np.array_equal(decrypted_np, decrypted_expected)
)
self.assertTrue(
np.array_equal(initial_np, decrypted_np)
)
def test_encrypt_decrypt_inplace(self):
key_size_bytes = 16
def sizeof(dtype):
if dtype == torch.bool:
return 1
elif dtype.is_floating_point:
return torch.finfo(dtype).bits // 8
else:
return torch.iinfo(dtype).bits // 8
def create_aes(m, k):
if m == "ecb":
return AES.new(k.tobytes(), AES.MODE_ECB)
elif m == "ctr":
ctr = Counter.new(
AES.block_size * 8, initial_value=0, little_endian=True
)
return AES.new(k.tobytes(), AES.MODE_CTR, counter=ctr)
else:
return None
for key_dtype in self.all_dtypes:
key_size = key_size_bytes // sizeof(key_dtype)
key = torch.empty(key_size, dtype=key_dtype).random_()
key_np = to_bytes(key)
for initial_dtype in self.all_dtypes:
for initial_size_bytes in [0, 16, 256]:
initial_size = initial_size_bytes // sizeof(initial_dtype)
initial = torch.empty(initial_size, dtype=initial_dtype).random_()
initial_np = to_bytes(initial)
initial_np_copy = np.copy(initial_np)
for mode in ["ecb", "ctr"]:
for device in self.all_devices:
key = key.to(device)
initial = initial.to(device)
csprng.encrypt(initial, initial, key, "aes128", mode)
encrypted_np = to_bytes(initial)
aes = create_aes(mode, key_np)
encrypted_expected = np.frombuffer(
aes.encrypt(initial_np_copy.tobytes()), dtype=np.int8
)
self.assertTrue(
np.array_equal(encrypted_np, encrypted_expected)
)
encrypted_np_copy = np.copy(encrypted_np)
csprng.decrypt(initial, initial, key, "aes128", mode)
decrypted_np = to_bytes(initial)
aes = create_aes(mode, key_np)
decrypted_expected = np.frombuffer(
aes.decrypt(encrypted_np_copy.tobytes()), dtype=np.int8
)
self.assertTrue(
np.array_equal(decrypted_np, decrypted_expected)
)
self.assertTrue(
np.array_equal(initial_np_copy, decrypted_np)
)
if __name__ == "__main__":
unittest.main()
|
# Copyright (c) Meta Platforms, Inc. and affiliates. All Rights Reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
from torchcsprng._C import *
try:
from .version import __version__, git_version # noqa: F401
except ImportError:
pass
|
#!/usr/bin/env python
"""
TODO: This was hard to read in pkg_helpers.bash, so I've extracted it
to its own script. This script is not yet being called by
pkg_helpers.bash yet.
"""
import os
import sys
import json
import re
cuver = os.environ.get('CU_VERSION')
cuver = (cuver[:-1] + '.' + cuver[-1]).replace('cu', 'cuda') if cuver != 'cpu' else cuver
pytorch_entries = json.load(sys.stdin)['pytorch']
filtered_pytorch_entries_plat_cuda = list(filter(
lambda x: (x['platform'] == 'darwin' or cuver in x['fn']), pytorch_entries
))
filtered_pytorch_entries_py_ver = list(filter(
lambda x: 'py' + os.environ['PYTHON_VERSION'] in x['fn'], filtered_pytorch_entries_plat_cuda
))
versions = [x['version'] for x in filtered_pytorch_entries_py_ver]
try:
last_entry = versions[-1]
print(re.sub(r'\\+.*$', '', last_entry))
except Exception as e:
all_platforms = set([x['platform'] for x in pytorch_entries])
all_fns = set([x['fn'] for x in pytorch_entries])
msg = "\n\t".join([
"Exception was: " + str(e),
"Unfiltered entries count: " + str(len(pytorch_entries)),
"Filtered by platform count: " + str(len(filtered_pytorch_entries_plat_cuda)),
"Filtered by python version count: " + str(len(filtered_pytorch_entries_py_ver)),
"all_platforms:\n" + "".join(map(lambda x: "\t\t" + str(x) + "\n", all_platforms)),
"all_fns:\n" + "".join(map(lambda x: "\t\t" + str(x) + "\n", all_fns)),
])
sys.exit(msg)
|
#!/usr/bin/env python3
import os.path
import unittest
import subprocess
import sys
import os
TIMEOUT = 2 * 60 * 60 # 2 hours
def run(command, timeout=None):
"""
Returns (return-code, stdout, stderr)
"""
completed = subprocess.run(command, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True,
encoding="utf8", timeout=timeout)
return completed.returncode, completed.stdout, completed.stderr
class TestRepos(unittest.TestCase):
pass
def _test(cls, directory):
command = os.path.join(directory, "run.sh")
(rc, out, err) = run(command, TIMEOUT)
cls.assertEqual(rc, 0, "Ran {}\nstdout:\n{}\nstderr:\n{}".format(
command, out, err))
def generate_test_objects(target_directory):
"""
Generate the tests, one for each repo
"""
repos = sorted([os.path.normpath(os.path.join(target_directory, o)) for o in os.listdir(target_directory) if os.path.isdir(os.path.join(target_directory, o))])
for f in repos:
print("found {}".format(f))
setattr(TestRepos, "test_" + f, lambda cls, f=f: _test(cls, f))
if __name__ == '__main__':
generate_test_objects('examples')
unittest.main()
|
import re
import subprocess32
import sys
PY3 = sys.version_info >= (3, 0)
reinforce_cmd = 'python examples/reinforcement_learning/reinforce.py'
actor_critic_cmd = 'python examples/reinforcement_learning/actor_critic.py'
def run(command, timeout):
"""
Returns (return-code, stdout, stderr)
"""
p = subprocess32.Popen(command, stdout=subprocess32.PIPE, stderr=subprocess32.PIPE, shell=True)
output, err = p.communicate(timeout=timeout)
rc = p.returncode
if PY3:
output = output.decode("ascii")
err = err.decode("ascii")
return (rc, output, err)
def check_cartpole_example(command, seconds=30, baseline_iter=1000):
"""
Runs command. Checks that:
1. the command exits within a timeout
2. cartpole is solved
3. the number of iters it takes to solve cartpole is less than baseline_iter
"""
(rc, stdout, stderr) = run(command, timeout=seconds)
print("stdout:\n", stdout)
print("stderr:\n", stderr)
if rc is not 0:
sys.exit(rc)
# Reinforce should have solved cartpole
matches = re.search('Solved!', stdout)
if matches is None:
print("error: reinforce didn't solve cartpole")
sys.exit(1)
matches = re.findall('Episode (\d+)', stdout)
if len(matches) is 0:
print("error: unexpected output: ", stdout)
sys.exit(1)
losses = [int(m) for m in matches]
if losses[-1] > baseline_iter:
print("error: too many iterations taken: {}".format(losses[-1]))
sys.exit(1)
if __name__ == '__main__':
check_cartpole_example(actor_critic_cmd, seconds=5*60, baseline_iter=4000)
# NOTE: Times out after 60 seconds; changed to 3 minutes
check_cartpole_example(reinforce_cmd, seconds=3*60, baseline_iter=4000)
|
import re
import subprocess
import sys
import os
PY3 = sys.version_info >= (3, 0)
def run(command, timeout):
"""
Returns (return-code, stdout, stderr)
"""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = p.communicate(timeout=timeout)
rc = p.returncode
if PY3:
output = output.decode("ascii")
err = err.decode("ascii")
return rc, output, err
# data lives in $BASEDIR/cocotrain2014/
command_args = [
'python',
'examples/fast_neural_style/neural_style/neural_style.py',
'train',
'--dataset',
'cocotrain2014',
'--style-image',
'examples/fast_neural_style/images/style-images/mosaic.jpg',
'--save-model-dir',
'./saved_models',
'--epochs',
'1',
'--image-size=128',
'--cuda',
'0' if os.environ.get("CU_VERSION") == 'cpu' else '1',
]
command = " ".join(command_args)
def main():
# Test: run one epoch of fast neural style training. Warning: takes a while (half an hour?)
(rc, stdout, err) = subprocess.check_output(command, shell=True)
print("stdout:\n", stdout, "stderr:\n", err)
if rc is not 0:
sys.exit(rc)
# Processes the output for losses
matches = re.findall('total: (\d+\.\d*)', stdout)
if len(matches) is 0:
print("error: unexpected output:", stdout)
sys.exit(1)
losses = [float(m) for m in matches]
# Smoke test: assert losses are decreasing
prev = float('Inf')
for loss in losses:
if loss > prev:
print("error: non-decreasing loss:", losses)
sys.exit(1)
if __name__ == '__main__':
main()
|
#!/usr/bin/env python
import zipfile
import re
import sys
def unzip(path):
"""
Unzips /path/to/some.zip to ./some
Doesn't work with - or _ in 'some'
"""
match = re.search("(\w+)\.zip", path)
if match is None:
print("Could not parse path")
return
dest = match.group(1)
with zipfile.ZipFile(path) as z:
z.extractall(dest)
if __name__ == '__main__':
unzip(sys.argv[1])
|
import re
import subprocess
import sys
import argparse
PY3 = sys.version_info >= (3, 0)
blacklist = [
"./advanced_source/super_resolution_with_caffe2.py",
# The docker image's python has some trouble with decoding unicode
"./intermediate_source/char_rnn_classification_tutorial.py",
]
visual = [
"./advanced_source/neural_style_tutorial.py",
"./beginner_source/blitz/cifar10_tutorial.py",
"./beginner_source/data_loading_tutorial.py",
"./beginner_source/transfer_learning_tutorial.py",
"./intermediate_source/char_rnn_generation_tutorial.py",
"./intermediate_source/reinforcement_q_learning.py",
"./intermediate_source/seq2seq_translation_tutorial.py",
"./intermediate_source/spatial_transformer_tutorial.py",
]
def run(command):
"""
Returns (return-code, stdout, stderr)
"""
p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
output, err = p.communicate()
rc = p.returncode
if PY3:
output = output.decode("ascii")
err = err.decode("ascii")
return (rc, output, err)
def main():
parser = argparse.ArgumentParser(
description="Run all pytorch tutorials")
parser.add_argument('--visual', dest='visual',
action='store_true',
default=False,
help='Run the tutorials that rely on a GUI. Default: False')
parser.add_argument('--py', dest='python',
action='store',
default='python',
help='the python binary. Default: python')
parser.add_argument('--all', dest='all',
action='store_true',
default=False,
help='Run all tutorials, include visual and blacklisted ones.')
args = parser.parse_args()
run_visual = args.visual
(rc, stdout, stderr) = run("find . -type f | grep -P 'source.+py$'")
if rc is not 0:
print("Couldn't execute find")
exit(1)
files = stdout.split('\n')
files = [f for f in files if len(f) > 0]
failed = []
warns = []
python = args.python
for f in files:
if not args.all and f in blacklist:
print("skipping {}".format(f))
continue
if not args.all and not run_visual and f in visual:
print("skipping {} b/c --visual was not set".format(f))
continue
(rc, out, err) = run("{} {}".format(python, f))
fail_msg = ""
if rc is not 0:
failed.append((rc, out, err, f))
fail_msg = " [FAILED]"
if rc is 0 and len(err) is not 0:
warns.append((rc, out, err, f))
fail_msg = " [WARNINGS]"
print("testing {}{}".format(f, fail_msg))
if len(failed) is 0 and len(warns) is 0:
print("All tutorials ran successfully")
exit(0)
for (rc, out, err, f) in warns:
print("-" * 50)
print("[WARNINGS] {} {} had warnings:".format(python, f))
print("return code: {}\nstdout:\n{}\nstderr:\n{}\n".format(
rc, out, err))
if len(failed) is 0:
exit(0)
for (rc, out, err, f) in failed:
print("-" * 50)
print("[FAILED] {} {} failed with the following:".format(python, f))
print("return code: {}\nstdout:\n{}\nstderr:\n{}\n".format(
rc, out, err))
exit(1)
if __name__ == '__main__':
main()
|
# Logic copied from PEP 513
def is_manylinux1_compatible():
# Only Linux, and only x86-64 / i686
from distutils.util import get_platform
if get_platform() not in ["linux-x86_64", "linux-i686"]:
return False
# Check for presence of _manylinux module
try:
import _manylinux
return bool(_manylinux.manylinux1_compatible)
except (ImportError, AttributeError):
# Fall through to heuristic check below
pass
# Check glibc version. CentOS 5 uses glibc 2.5.
return have_compatible_glibc(2, 5)
def have_compatible_glibc(major, minimum_minor):
import ctypes
process_namespace = ctypes.CDLL(None)
try:
gnu_get_libc_version = process_namespace.gnu_get_libc_version
except AttributeError:
# Symbol doesn't exist -> therefore, we are not linked to
# glibc.
return False
# Call gnu_get_libc_version, which returns a string like "2.5".
gnu_get_libc_version.restype = ctypes.c_char_p
version_str = gnu_get_libc_version()
# py2 / py3 compatibility:
if not isinstance(version_str, str):
version_str = version_str.decode("ascii")
# Parse string and check against requested version.
version = [int(piece) for piece in version_str.split(".")]
assert len(version) == 2
if major != version[0]:
return False
if minimum_minor > version[1]:
return False
return True
import sys
if is_manylinux1_compatible():
print("%s is manylinux1 compatible" % (sys.executable,))
sys.exit(0)
else:
print("%s is NOT manylinux1 compatible" % (sys.executable,))
sys.exit(1)
|
# cf. https://github.com/pypa/manylinux/issues/53
GOOD_SSL = "https://google.com"
BAD_SSL = "https://self-signed.badssl.com"
import sys
print("Testing SSL certificate checking for Python:", sys.version)
if (sys.version_info[:2] < (2, 7)
or sys.version_info[:2] < (3, 4)):
print("This version never checks SSL certs; skipping tests")
sys.exit(0)
if sys.version_info[0] >= 3:
from urllib.request import urlopen
EXC = OSError
else:
from urllib import urlopen
EXC = IOError
print("Connecting to %s should work" % (GOOD_SSL,))
urlopen(GOOD_SSL)
print("...it did, yay.")
print("Connecting to %s should fail" % (BAD_SSL,))
try:
urlopen(BAD_SSL)
# If we get here then we failed:
print("...it DIDN'T!!!!!11!!1one!")
sys.exit(1)
except EXC:
print("...it did, yay.") |
# Utility script to print the python tag + the abi tag for a Python
# See PEP 425 for exactly what these are, but an example would be:
# cp27-cp27mu
from wheel.pep425tags import get_abbr_impl, get_impl_ver, get_abi_tag
print("{0}{1}-{2}".format(get_abbr_impl(), get_impl_ver(), get_abi_tag())) |
import json
import sys
# Usage:
# write_json.py input_file output_file
# Reads a file of '<platform> <log_name> <size>' into a json file
inputfile = sys.argv[1]
outputfile = sys.argv[2]
data = []
with open(inputfile, 'r') as infile:
for line in infile:
platform, pkg_type, py_ver, cu_ver, size = line.split()
data.append({
'os': platform,
'pkgType': pkg_type,
'pyVer': py_ver,
'cuVer': cu_ver,
'size': size,
})
with open(outputfile, 'w') as outfile:
json.dump(data, outfile)
|
import json
import sys
# Usage:
# parse_conda_json.py input_file output_file
# Reads the result of a `conda search --json` into lines of '<platform>
# <log_name> <size>'
inputfile = sys.argv[1]
outputfile = sys.argv[2]
data = []
with open(inputfile, 'rb') as jsonfile:
rawdata = json.load(jsonfile)
# conda search returns format {'pytorch-nightly': [{key:val}...]}
pkg_name = list(rawdata.keys())[0]
print('parse_conda_json.py:: Parsing package {}'.format(pkg_name))
# Loop through versions found, keeping only 'build', and size
# size is in bytes
for result in rawdata[pkg_name]:
# N.B. platform is queried as 'linux-64' but is stores as linux, and as
# 'osx-64' but stored as 'darwin'
plat = 'linux' if 'linux' in result['platform'] else 'macos'
size = result['size']
# 'build' is of the form
# linux CUDA builds: 'py2.7_cuda8.0.61_cudnn7.1.2_0'
# linux CPU builds: 'py2.7_cpu_0'
# MacOS builds: 'py2.7_0'
build = result['build'].split('_')
print('parse_conda_json.py:: Size of {} conda {} is {}'.format(plat, build, size))
if plat == 'macos':
# We expect a format like 'py2.7_0'
assert len(build) == 2, "Unexpected MacOS build string {}".format(build)
else:
# We expect a format like:
# CUDA builds: 'py2.7_cuda8.0.61_cudnn7.1.2_0'
# CPU builds: 'py2.7_cpu_0'
assert len(build) in (3,4), "Unexpected Linux build string {}".format(build)
# Python versions are of form 'py#.#' , we discard the 'py'
py_ver = build[0][2:]
# CUDA versions are of the form 'cuda10.0.61', we replace 'cuda' with
# 'cu' and keep only the major and minor values
if build[1].startswith('cuda'):
cu_ver = build[1][4:].split('.')
assert len(cu_ver) == 3, "Unexpected cuda format {}".format(cu_ver)
cu_ver = 'cu' + ''.join((cu_ver[0], cu_ver[1]))
else:
cu_ver = 'cpu'
data.append((plat, py_ver, cu_ver, size))
# Write the sizes out in log_name format of conda_2.7_cu80
print("parse_conda_json.py:: Writing log_name format to {}".format(outputfile))
with open(outputfile, 'a') as outfile:
for plat, py_ver, cu_ver, size in data:
outfile.write("{} conda {} {} {}\n".format(plat, py_ver, cu_ver, size))
|
#!/usr/bin/env python3.7
from datetime import datetime, time
import json
import requests
import itertools
import sqlite3
import os
import sys
from typing import Callable, Dict, List, MutableSet, Optional, Sequence
def get_executor_price_rate(executor):
(etype, eclass) = executor['type'], executor['resource_class']
assert etype in ['machine', 'external', 'docker', 'macos', 'runner'], f'Unexpected type {etype}:{eclass}'
if etype == 'machine':
return {
'medium': 10,
'large': 20,
'xlarge': 100,
'2xlarge': 200,
'gpu.medium': 160,
'gpu.large': 320,
'gpu.small': 80,
'windows.medium': 40,
'windows.large': 120,
'windows.xlarge': 210,
'windows.2xlarge': 500,
'windows.gpu.nvidia.medium': 500,
'gpu.nvidia.small': 160,
'gpu.nvidia.medium': 240,
'gpu.nvidia.large': 1000,
}[eclass]
if etype == 'macos':
return {
'medium': 50,
'large': 100,
}[eclass]
if etype == 'docker':
return {
'small': 5,
'medium': 10,
'medium+': 15,
'large': 20,
'xlarge': 40,
'2xlarge': 80,
'2xlarge+': 100,
}[eclass]
if etype == 'runner' or etype == 'external':
return {
'pytorch/amd-gpu': 0,
}[eclass]
raise RuntimeError(f'Undefined executor {etype}:{eclass}')
price_per_credit = 6e-4
def get_circleci_token() -> str:
token_file_path = os.path.join(os.getenv('HOME'), '.circleci_token')
token = os.getenv('CIRCLECI_TOKEN')
if token is not None: return token
if not os.path.exists(token_file_path):
raise RuntimeError('Can not get CirclCI token neither from CIRCLECI_TOKEN environment variable, nor via ~/.circleci_token file')
with open(token_file_path) as f:
return f.read().strip()
def is_workflow_in_progress(workflow: Dict) -> bool:
return workflow['status'] in ['running', 'not_run', 'failing', 'on_hold']
class CircleCICache:
def __init__(self, token: Optional[str], db_name: str = 'circleci-cache.db') -> None:
file_folder = os.path.dirname(__file__)
self.url_prefix = 'https://circleci.com/api/v2'
self.session = requests.session()
self.headers = {
'Accept': 'application/json',
'Circle-Token': token,
} if token is not None else None
self.db = sqlite3.connect(os.path.join(file_folder, db_name))
self.db.execute('CREATE TABLE IF NOT EXISTS jobs(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS artifacts(slug TEXT NOT NULL, job_id INTEGER NOT NULL, json TEXT NOT NULL);')
self.db.execute('CREATE UNIQUE INDEX IF NOT EXISTS jobs_key on jobs(slug, job_id);')
self.db.execute('CREATE TABLE IF NOT EXISTS workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS pipeline_workflows(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL);')
self.db.execute('CREATE TABLE IF NOT EXISTS pipelines(id TEXT NOT NULL PRIMARY KEY, json TEXT NOT NULL, branch TEXT, revision TEXT);')
self.db.commit()
def is_offline(self) -> bool:
return self.headers is None
def _get_paged_items_list(self, url: str, params = {}, item_count: Optional[int] =-1) -> List:
rc, token, run_once = [], None, False
def _should_quit():
nonlocal run_once, rc, token
if not run_once:
run_once = True
return False
if token is None: return True
if item_count is None: return True
return item_count >= 0 and len(rc) >= item_count
while not _should_quit():
if token is not None: params['page-token'] = token
r = self.session.get(url, params = params, headers = self.headers)
try:
j = r.json()
except json.JSONDecodeError:
print(f"Failed to decode {rc}", file=sys.stderr)
raise
if 'message' in j:
raise RuntimeError(f'Failed to get list from {url}: {j["message"]}')
token = j['next_page_token']
rc.extend(j['items'])
return rc
def get_pipelines(self, project: str = 'github/pytorch/pytorch',branch: Optional[str] = None, item_count: Optional[int] = None) -> List:
if self.is_offline():
c = self.db.cursor()
cmd = "SELECT json from pipelines"
if branch is not None:
cmd += f" WHERE branch='{branch}'"
if item_count is not None and item_count > 0:
cmd += f" LIMIT {item_count}"
c.execute(cmd)
return [json.loads(val[0]) for val in c.fetchall()]
rc = self._get_paged_items_list( f'{self.url_prefix}/project/{project}/pipeline', {'branch': branch} if branch is not None else {}, item_count)
for pipeline in rc:
vcs = pipeline['vcs']
pid, branch, revision, pser = pipeline['id'], vcs['branch'], vcs['revision'], json.dumps(pipeline)
self.db.execute("INSERT OR REPLACE INTO pipelines(id, branch, revision, json) VALUES (?, ?, ?, ?)", (pid, branch, revision, pser))
self.db.commit()
return rc
def get_pipeline_workflows(self, pipeline) -> List:
c = self.db.cursor()
c.execute("SELECT json FROM pipeline_workflows WHERE id=?", (pipeline,))
rc = c.fetchone()
if rc is not None:
rc = json.loads(rc[0])
if not any([is_workflow_in_progress(w) for w in rc]) or self.is_offline():
return rc
if self.is_offline():
return []
rc = self._get_paged_items_list(f'{self.url_prefix}/pipeline/{pipeline}/workflow')
self.db.execute("INSERT OR REPLACE INTO pipeline_workflows(id, json) VALUES (?, ?)", (pipeline, json.dumps(rc)))
self.db.commit()
return rc
def get_workflow_jobs(self, workflow, should_cache = True) -> List:
c = self.db.cursor()
c.execute("select json from workflows where id=?", (workflow,))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return []
rc = self._get_paged_items_list(f'{self.url_prefix}/workflow/{workflow}/job')
if should_cache:
self.db.execute("INSERT INTO workflows(id, json) VALUES (?, ?)", (workflow, json.dumps(rc)))
self.db.commit()
return rc
def get_job(self, project_slug, job_number) -> Dict:
c = self.db.cursor()
c.execute("select json from jobs where slug=? and job_id = ?", (project_slug, job_number))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return {}
r = self.session.get(f'{self.url_prefix}/project/{project_slug}/job/{job_number}', headers = self.headers)
try:
rc=r.json()
except json.JSONDecodeError:
print(f"Failed to decode {rc}", file=sys.stderr)
raise
self.db.execute("INSERT INTO jobs(slug,job_id, json) VALUES (?, ?, ?)", (project_slug, job_number, json.dumps(rc)))
self.db.commit()
return rc
def get_job_artifacts(self, project_slug, job_number) -> List[Dict]:
c = self.db.cursor()
c.execute("select json from artifacts where slug=? and job_id = ?", (project_slug, job_number))
rc = c.fetchone()
if rc is not None:
return json.loads(rc[0])
if self.is_offline():
return {}
rc = self._get_paged_items_list(f"{self.url_prefix}/project/{project_slug}/{job_number}/artifacts")
self.db.execute("INSERT INTO artifacts(slug,job_id, json) VALUES (?, ?, ?)", (project_slug, job_number, json.dumps(rc)))
self.db.commit()
return rc
def get_pipeline_jobs(self, project: str = 'github/pytorch/pytorch', branch: Optional[str] = None, item_count: Optional[int] = None) -> Sequence:
for pipeline in self.get_pipelines(project, branch, item_count):
for workflow in self.get_pipeline_workflows(pipeline['id']):
in_progress = is_workflow_in_progress(workflow)
for job in self.get_workflow_jobs(workflow['id'], should_cache = not in_progress):
yield (pipeline, workflow, job)
def get_jobs_summary(self, slug='gh/pytorch/pytorch', workflow='build') -> Dict:
r = requests.get(f'{self.url_prefix}/insights/{slug}/workflows/{workflow}/jobs', headers = self.headers)
rc = dict()
for item in r.json()['items']:
rc[item['name']] = item
return rc
def get_jobs_summary(self, slug='gh/pytorch/pytorch', workflow='build') -> Dict:
r = requests.get(f'{self.url_prefix}/insights/{slug}/workflows/{workflow}/jobs', headers = self.headers)
rc = dict()
for item in r.json()['items']:
rc[item['name']] = item
return rc
def get_job_timeseries(self, job_name, slug='gh/pytorch/pytorch', workflow='build') -> List:
r = requests.get(f'{self.url_prefix}/insights/{slug}/workflows/build/jobs/{job_name}', headers = self.headers)
return [(datetime.fromisoformat(x['started_at'][:-1]), x['duration']) for x in r.json()['items'] if x['status'] == 'success']
def aggregate_by_day(series):
rc = {}
for (ts, val) in series:
date = datetime.combine(ts.date(), time())
valcount = [val, 1.0]
if date not in rc:
rc[date] = valcount
else:
rc[date] = [sum(x) for x in zip(rc[date], valcount)]
return [(x, rc[x][0] / rc[x][1]) for x in sorted(rc.keys())]
def plot_graph(name_filter=None, output_file=None):
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
ci_cache = CircleCICache(token=get_circleci_token())
summary = ci_cache.get_jobs_summary()
test_jobs = [ name for name in summary.keys() if name.startswith('pytorch') and 'test' in name]
series = []
labels = []
styles = [f'{color}{style}' for (style,color) in itertools.product(['-','--','-.',':'], ['b','g','r','c','m','y','k'])]
for name in test_jobs:
label=f"{name}(p95 = {int(summary[name]['metrics']['duration_metrics']['p95']/60)} min)"
print(label)
if name_filter is not None and name_filter not in name:
continue
ts = ci_cache.get_job_timeseries(name)
if len(ts) == 0:
continue
labels.append(label)
series.append(ts)
x,y=zip(*aggregate_by_day(ts))
plt.plot(x, y, styles[len(labels)%len(styles)])
plt.legend(labels)
if output_file is not None:
plt.savefig(output_file)
else:
plt.show()
def print_line(line: str, padding: Optional[int] =None, newline: bool =True) -> None:
if padding is not None and len(line) < padding:
line += ' '*(padding - len(line))
print(line, end = '\n' if newline else '\r', flush=True)
def fetch_status(branch=None, item_count=50):
isatty = sys.stdout.isatty()
padding = os.get_terminal_size().columns -1 if isatty else None
ci_cache = CircleCICache(token=get_circleci_token())
print(f"About to fetch {item_count} latest pipelines against {branch if branch is not None else 'all branches'}")
pipelines = ci_cache.get_pipelines(branch=branch, item_count=item_count)
total_price, total_master_price = 0, 0
for pipeline in pipelines:
revision = pipeline['vcs']['revision']
branch = pipeline['vcs']['branch']
workflows = ci_cache.get_pipeline_workflows(pipeline['id'])
known_job_ids = []
for workflow in workflows:
url = f'https://app.circleci.com/pipelines/github/pytorch/pytorch/{workflow["pipeline_number"]}/workflows/{workflow["id"]}'
if is_workflow_in_progress(workflow):
print_line(f'Skipping {url} name:{workflow["name"]} status:{workflow["status"]}', newline=not sys.stdout.isatty())
continue
rerun=False
total_credits, test_credits, gpu_credits, wincpu_credits, wingpu_credits = 0, 0, 0, 0, 0
jobs = ci_cache.get_workflow_jobs(workflow['id'])
for job in jobs:
job_name, job_status, job_number = job['name'], job['status'], job.get('job_number', None)
if job_status in ['blocked', 'canceled', 'unauthorized', 'running', 'not_run', 'failing']: continue
if job_number is None:
print(job)
continue
if job_number in known_job_ids:
rerun = True
continue
job_info = ci_cache.get_job(job['project_slug'], job_number)
if 'executor' not in job_info:
print(f'executor not found in {job_info}')
continue
job_executor = job_info['executor']
resource_class = job_executor['resource_class']
if resource_class is None:
print(f'resource_class is none for {job_info}')
continue
job_on_gpu = 'gpu' in resource_class
job_on_win = 'windows' in resource_class
duration = datetime.fromisoformat(job_info['stopped_at'][:-1]) - datetime.fromisoformat(job_info['started_at'][:-1])
job_credits = get_executor_price_rate(job_executor) * int(job_info['duration']) * 1e-3 / 60
job_cost = job_credits * price_per_credit
total_credits += job_credits
if 'test' in job_name or job_name.startswith('smoke_'):
test_credits += job_credits
elif job_on_gpu:
print(f'Running build job {job_name} on GPU!!!')
if job_on_gpu:
gpu_credits += job_credits
if job_on_win: wingpu_credits += job_credits
if job_on_win and not job_on_gpu:
wincpu_credits += job_credits
known_job_ids.append(job_number)
print_line(f' {job_name} {job_status} {duration} ${job_cost:.2f}', padding = padding, newline = not isatty)
# Increment totals
total_price += total_credits * price_per_credit
if branch in ['master', 'nightly', 'postnightly', 'release/1.6']:
total_master_price += total_credits * price_per_credit
# skip small jobs
if total_credits * price_per_credit < .1: continue
workflow_status = f'{url} {workflow["name"]} status:{workflow["status"]} price: ${total_credits * price_per_credit:.2f}'
workflow_status += ' (Rerun?)' if rerun else ''
workflow_status += f'\n\t\tdate: {workflow["created_at"]} branch:{branch} revision:{revision}'
workflow_status += f'\n\t\ttotal credits: {int(total_credits)}'
if test_credits != 0:
workflow_status += f' testing: {100 * test_credits / total_credits:.1f}%'
if gpu_credits != 0:
workflow_status += f' GPU testing: {100 * gpu_credits / total_credits:.1f}%'
if wingpu_credits != 0:
workflow_status += f' WINGPU/GPU: {100 * wingpu_credits / gpu_credits:.1f}%'
if wincpu_credits != 0:
workflow_status += f' Win CPU: {100 * wincpu_credits / total_credits:.1f}%'
workflow_status += f' Total: ${total_price:.2f} master fraction: {100 * total_master_price/ total_price:.1f}%'
print_line(workflow_status, padding = padding)
def plot_heatmap(cov_matrix, names):
import numpy as np
import matplotlib.pyplot as plt
assert cov_matrix.shape == (len(names), len(names))
fig, ax = plt.subplots()
im = ax.imshow(cov_matrix)
ax.set_xticks(np.arange(len(names)))
ax.set_yticks(np.arange(len(names)))
ax.set_xticklabels(names)
ax.set_yticklabels(names)
#Rotate tick labels
plt.setp(ax.get_xticklabels(), rotation=45, ha='right', rotation_mode='anchor')
# Annotate values
for i in range(len(names)):
for j in range(len(names)):
ax.text(j, i, f'{cov_matrix[i, j]:.2f}', ha = 'center', va = 'center', color = 'w')
plt.show()
def filter_service_jobs(name):
if name.startswith('docker'):
return True
if name.startswith('binary'):
return True
return False
def filter_cuda_test(name):
if filter_service_jobs(name):
return False
if 'libtorch' in name:
return False
if 'test' not in name:
return False
# Skip jit-profiling tests
if 'jit-profiling' in name:
return False
if 'cuda11' in name:
return False
# Skip VS2017 tests
if 'vs2017' in name:
return False
return 'cuda' in name and 'nogpu' not in name
def filter_cuda_build(name):
if filter_service_jobs(name):
return False
if 'libtorch' in name:
return False
return 'cuda' in name and name.endswith('build')
def filter_windows_test(name):
if filter_service_jobs(name):
return False
# Skip jit-profiling tests
if 'jit-profiling' in name:
return False
return 'test' in name and 'windows' in name
def compute_covariance(branch='master', name_filter: Optional[Callable[[str], bool]] = None):
import numpy as np
revisions: MutableSet[str] = set()
job_summary: Dict[str, Dict[str, float]] = {}
# Extract data
print(f"Computing covariance for {branch if branch is not None else 'all branches'}")
ci_cache = CircleCICache(None)
pipelines = ci_cache.get_pipelines(branch = branch)
for pipeline in pipelines:
if pipeline['trigger']['type'] == 'schedule':
continue
revision = pipeline['vcs']['revision']
pipeline_jobs: Dict[str, float] = {}
blocked_jobs: MutableSet[str] = set()
workflows = ci_cache.get_pipeline_workflows(pipeline['id'])
for workflow in workflows:
if is_workflow_in_progress(workflow):
continue
jobs = ci_cache.get_workflow_jobs(workflow['id'])
for job in jobs:
job_name = job['name']
job_status = job['status']
# Handle renames
if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX2_test':
job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX2_test'
if job_name == 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_NO_AVX_NO_AVX2_test':
job_name = 'pytorch_linux_xenial_cuda10_1_cudnn7_py3_nogpu_NO_AVX_test'
if job_status in ['infrastructure_fail', 'canceled']:
continue
if callable(name_filter) and not name_filter(job_name):
continue
if job_status == 'blocked':
blocked_jobs.add(job_name)
continue
if job_name in blocked_jobs:
blocked_jobs.remove(job_name)
result = 1.0 if job_status == 'success' else -1.0
pipeline_jobs[job_name] = result
# Skip build with blocked job [which usually means build failed due to the test failure]
if len(blocked_jobs) != 0:
continue
# Skip all success workflows
if all([result == 1.0 for result in pipeline_jobs.values()]):
continue
revisions.add(revision)
for job_name in pipeline_jobs:
if job_name not in job_summary:
job_summary[job_name] = {}
job_summary[job_name][revision] = pipeline_jobs[job_name]
# Analyze results
job_names = sorted(job_summary.keys())
#revisions = sorted(revisions)
job_data = np.zeros((len(job_names), len(revisions)), dtype=np.float)
print(f"Number of observations: {len(revisions)}")
for job_idx, job_name in enumerate(job_names):
job_row = job_summary[job_name]
for rev_idx, revision in enumerate(revisions):
if revision in job_row:
job_data[job_idx, rev_idx] = job_row[revision]
success_rate = job_data[job_idx,].sum(where=job_data[job_idx,]>0.0) / len(job_row)
present_rate = 1.0 * len(job_row) / len(revisions)
print(f"{job_name}: missing {100.0 * (1.0 - present_rate):.2f}% success rate: {100 * success_rate:.2f}%")
cov_matrix = np.corrcoef(job_data)
plot_heatmap(cov_matrix, job_names)
def print_artifacts(branch, item_count, name_filter: Callable[[str], bool]) -> None:
ci_cache = CircleCICache(token=get_circleci_token())
for pipeline, workflow, job in ci_cache.get_pipeline_jobs(branch=branch, item_count = item_count):
revision = pipeline['vcs']['revision']
if not name_filter(job["name"]):
continue
job_number = job.get("job_number")
if job_number is None:
continue
artifacts = ci_cache.get_job_artifacts('gh/pytorch/pytorch', job_number)
for artifact in artifacts:
name = os.path.basename(artifact['path'])
url = artifact["url"]
print(f"{revision} {name} {url}")
def parse_arguments():
from argparse import ArgumentParser
parser = ArgumentParser(description="Download and analyze circle logs")
parser.add_argument('--plot-graph', type=str, nargs = '?', help="Plot job time trends", const = '')
parser.add_argument('--output', type=str, help="Output file name for the graphs")
parser.add_argument('--get_artifacts', type=str)
parser.add_argument('--branch', type=str)
parser.add_argument('--item_count', type=int, default=100)
parser.add_argument('--compute_covariance', choices=['cuda_test', 'cuda_build', 'windows_test'])
return parser.parse_args()
if __name__ == '__main__':
args = parse_arguments()
if args.get_artifacts is not None:
print_artifacts(branch=args.branch,
item_count=args.item_count,
name_filter=lambda x: args.get_artifacts in x)
sys.exit(0)
if args.compute_covariance is not None:
name_filter = {
'cuda_test': filter_cuda_test,
'cuda_build': filter_cuda_build,
'windows_test': filter_windows_test,
}[args.compute_covariance]
compute_covariance(branch=args.branch, name_filter=name_filter)
sys.exit(0)
if args.plot_graph is not None:
plot_graph(args.plot_graph, args.output)
sys.exit(0)
fetch_status(branch=args.branch, item_count=args.item_count)
|
#!/usr/bin/env python3
# Tool for analyzing sizes of CUDA kernels for various GPU architectures
import os
import struct
import sys
# Try to auto-import elftools
try:
from elftools.elf.elffile import ELFFile
except ModuleNotFoundError:
print(f'elftools module not found, trying to install it from pip')
from pip._internal import main as pip_main
try:
pip_main(["install", "pyelftools", "--user"])
except SystemExit:
print(f'PIP installation failed, please install it manually by invoking "{sys.executable} -mpip install pyelftools --user"')
sys.exit(-1)
from elftools.elf.elffile import ELFFile
# From https://stackoverflow.com/questions/1094841/reusable-library-to-get-human-readable-version-of-file-size
def sizeof_fmt(num, suffix='B'):
for unit in ['','Ki','Mi','Gi','Ti','Pi','Ei','Zi']:
if abs(num) < 1024.0:
return "%3.1f%s%s" % (num, unit, suffix)
num /= 1024.0
return "%.1f%s%s" % (num, 'Yi', suffix)
def compute_cubin_sizes(file_name, section_name='.nv_fatbin', debug=False):
with open(file_name, 'rb') as f:
elf_file=ELFFile(f)
nv_fatbin=elf_file.get_section_by_name(section_name)
if nv_fatbin is None:
return {}
data=nv_fatbin.data()
idx, offs = 0, 0
elf_sizes = {}
while offs < len(data):
(magic, version, header_size, fatbin_size)=struct.unpack('IHHL', data[offs: offs + 16])
if magic != 0xba55ed50 or version != 1:
raise RuntimeError(f"Unexpected fatbin magic {hex(magic)} or version {version}")
if debug:
print(f"Found fatbin at {offs} header_size={header_size} fatbin_size={fatbin_size}")
offs += header_size
fatbin_end = offs + fatbin_size
while offs < fatbin_end:
(kind, version, hdr_size, elf_size, empty, code_ver, sm_ver) = struct.unpack('HHILLIH', data[offs: offs + 30])
if version != 0x0101 or kind not in [1, 2]:
raise RuntimeError(f"Unexpected cubin version {hex(version)} or kind {kind}")
sm_ver = f'{"ptx" if kind == 1 else "sm"}_{sm_ver}'
if debug:
print(f" {idx}: elf_size={elf_size} code_ver={hex(code_ver)} sm={sm_ver}")
if sm_ver not in elf_sizes: elf_sizes[sm_ver] = 0
elf_sizes[sm_ver] += elf_size
idx, offs = idx + 1, offs + hdr_size + elf_size
offs = fatbin_end
return elf_sizes
def main():
if sys.platform != 'linux':
print('This script only works with Linux ELF files')
return
if len(sys.argv) < 2:
print(f"{sys.argv[0]} invoked without any arguments trying to infer location of libtorch_cuda")
import torch
fname = os.path.join(os.path.dirname(torch.__file__), 'lib', 'libtorch_cuda.so')
else:
fname = sys.argv[1]
if not os.path.exists(fname):
print(f"Can't find {fname}")
sys.exit(-1)
print(f"Analyzing {fname}")
for section_name in ['.nv_fatbin', '__nv_relfatbin']:
elf_sizes = compute_cubin_sizes(fname, section_name)
print(f"{section_name} size {sizeof_fmt(sum(elf_sizes.values()))}")
for (sm_ver, total_size) in elf_sizes.items():
print(f" {sm_ver}: {sizeof_fmt(total_size)}")
if __name__ == '__main__':
main()
|
from collections import defaultdict
from datetime import datetime, timedelta, timezone
import gzip
import multiprocessing
import os
import re
import urllib
from tqdm import tqdm
import botocore
import boto3
S3 = boto3.resource('s3')
CLIENT = boto3.client('s3')
BUCKET = S3.Bucket('pytorch')
class CacheEntry:
_size = None
def __init__(self, download_uri: str):
self.download_uri = download_uri
self.bytes_sent = 0
@property
def os_type(self) -> str:
os_type = "linux"
if "win" in self.download_uri:
os_type = "windows"
elif "macosx" in self.download_uri:
os_type = "macos"
return os_type
@property
def target_arch(self) -> str:
target_arch = "cpu"
result = re.search(r"cu[0-9]+", self.download_uri)
if result:
target_arch = result[0]
return target_arch
@property
def package_name(self) -> str:
filename_contents = os.path.basename(self.download_uri).split('-')
return filename_contents[0]
@property
def package_version(self) -> str:
if "dev" in self.download_uri:
results = re.search(
r"[0-9]+\.[0-9]+\.[0-9]+\.dev[0-9]+",
self.download_uri
)
else:
results = re.search(
r"[0-9]+\.[0-9]+\.[0-9]+", self.download_uri
)
if not results:
raise Exception("Wtf there's no version o.O")
return results[0]
@property
def size(self) -> int:
if self._size is None:
for key in BUCKET.objects.filter(
Prefix=self.download_uri.lstrip("/")
):
self._size = key.size
if self._size is None:
raise Exception(
f"No object found for prefix {self.download_uri}"
)
return self._size
@property
def downloads(self):
return self.bytes_sent // self.size
def parse_logs(log_directory: str) -> dict:
bytes_cache = dict()
entries = []
for (dirpath, _, filenames) in os.walk(log_directory):
for filename in tqdm(filenames):
with gzip.open(os.path.join(dirpath, filename), 'r') as gf:
string = gf.read().decode("utf-8")
entries += string.splitlines()[2:]
for entry in entries:
columns = entry.split('\t')
bytes_sent = int(columns[3])
download_uri = urllib.parse.unquote(
urllib.parse.unquote(columns[7])
)
status = columns[8]
if not all([
status.startswith("2"),
download_uri.endswith((".whl", ".zip"))
]):
continue
if not bytes_cache.get(download_uri):
bytes_cache[download_uri] = CacheEntry(download_uri)
bytes_cache[download_uri].bytes_sent += bytes_sent
return bytes_cache
def output_results(bytes_cache: dict) -> None:
os_results = defaultdict(int)
arch_results = defaultdict(int)
package_results = defaultdict(lambda: defaultdict(int))
for _, val in tqdm(bytes_cache.items()):
try:
os_results[val.os_type] += val.downloads
arch_results[val.target_arch] += val.downloads
package_results[val.package_name][val.package_version] += (
val.downloads
)
except Exception:
pass
print("=-=-= Results =-=-=")
print("=-=-= OS =-=-=")
total_os_num = sum(os_results.values())
for os_type, num in os_results.items():
print(
f"\t* {os_type}: {num} ({(num/total_os_num)*100:.2f}%)"
)
print("=-=-= ARCH =-=-=")
total_arch_num = sum(arch_results.values())
for arch_type, num in arch_results.items():
print(
f"\t* {arch_type}: {num} ({(num/total_arch_num) * 100:.2f}%)"
)
print("=-=-= By Package =-=-=")
for package_name, upper_val in package_results.items():
print(f"=-=-= {package_name} =-=-=")
total_package_num = sum(upper_val.values())
for package_version, num in upper_val.items():
print(
f"\t* {package_version}: {num} ({(num/total_package_num) * 100:.2f}%)"
)
def download_logs(log_directory: str, since: float):
dt_now = datetime.now(timezone.utc)
dt_end = datetime(dt_now.year, dt_now.month, dt_now.day, tzinfo=timezone.utc)
dt_start = dt_end - timedelta(days=1, hours=1) # Add 1 hour padding to account for potentially missed logs due to timing
for key in tqdm(BUCKET.objects.filter(Prefix='cflogs')):
remote_fname = key.key
local_fname = os.path.join(log_directory, remote_fname)
# Only download things from yesterday
dt_modified = key.last_modified.replace(tzinfo=timezone.utc)
if dt_start >= dt_modified or dt_end < dt_modified:
continue
# TODO: Do this in parallel
if not os.path.exists(local_fname):
dirname = os.path.dirname(local_fname)
if not os.path.exists(dirname):
os.makedirs(dirname)
CLIENT.download_file("pytorch", remote_fname, local_fname)
if __name__ == "__main__":
print("Downloading logs")
download_logs('cache', 1)
print("Parsing logs")
cache = parse_logs('cache/cflogs/')
print("Calculating results")
output_results(cache)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from pathlib import Path
from setuptools import setup
PKG_NAME = "python-doctr"
VERSION = os.getenv("BUILD_VERSION", "0.7.1a0")
if __name__ == "__main__":
print(f"Building wheel {PKG_NAME}-{VERSION}")
# Dynamically set the __version__ attribute
cwd = Path(__file__).parent.absolute()
with open(cwd.joinpath("doctr", "version.py"), "w", encoding="utf-8") as f:
f.write(f"__version__ = '{VERSION}'\n")
setup(name=PKG_NAME, version=VERSION)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import cv2
import matplotlib.pyplot as plt
import numpy as np
import streamlit as st
from doctr.file_utils import is_tf_available
from doctr.io import DocumentFile
from doctr.utils.visualization import visualize_page
if is_tf_available():
import tensorflow as tf
from backend.tensorflow import DET_ARCHS, RECO_ARCHS, forward_image, load_predictor
if any(tf.config.experimental.list_physical_devices("gpu")):
forward_device = tf.device("/gpu:0")
else:
forward_device = tf.device("/cpu:0")
else:
import torch
from backend.pytorch import DET_ARCHS, RECO_ARCHS, forward_image, load_predictor
forward_device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def main(det_archs, reco_archs):
"""Build a streamlit layout"""
# Wide mode
st.set_page_config(layout="wide")
# Designing the interface
st.title("docTR: Document Text Recognition")
# For newline
st.write("\n")
# Instructions
st.markdown("*Hint: click on the top-right corner of an image to enlarge it!*")
# Set the columns
cols = st.columns((1, 1, 1, 1))
cols[0].subheader("Input page")
cols[1].subheader("Segmentation heatmap")
cols[2].subheader("OCR output")
cols[3].subheader("Page reconstitution")
# Sidebar
# File selection
st.sidebar.title("Document selection")
# Disabling warning
st.set_option("deprecation.showfileUploaderEncoding", False)
# Choose your own image
uploaded_file = st.sidebar.file_uploader("Upload files", type=["pdf", "png", "jpeg", "jpg"])
if uploaded_file is not None:
if uploaded_file.name.endswith(".pdf"):
doc = DocumentFile.from_pdf(uploaded_file.read())
else:
doc = DocumentFile.from_images(uploaded_file.read())
page_idx = st.sidebar.selectbox("Page selection", [idx + 1 for idx in range(len(doc))]) - 1
page = doc[page_idx]
cols[0].image(page)
# Model selection
st.sidebar.title("Model selection")
st.sidebar.markdown("**Backend**: " + ("TensorFlow" if is_tf_available() else "PyTorch"))
det_arch = st.sidebar.selectbox("Text detection model", det_archs)
reco_arch = st.sidebar.selectbox("Text recognition model", reco_archs)
# For newline
st.sidebar.write("\n")
if st.sidebar.button("Analyze page"):
if uploaded_file is None:
st.sidebar.write("Please upload a document")
else:
with st.spinner("Loading model..."):
predictor = load_predictor(det_arch, reco_arch, forward_device)
with st.spinner("Analyzing..."):
# Forward the image to the model
seg_map = forward_image(predictor, page, forward_device)
seg_map = np.squeeze(seg_map)
seg_map = cv2.resize(seg_map, (page.shape[1], page.shape[0]), interpolation=cv2.INTER_LINEAR)
# Plot the raw heatmap
fig, ax = plt.subplots()
ax.imshow(seg_map)
ax.axis("off")
cols[1].pyplot(fig)
# Plot OCR output
out = predictor([page])
fig = visualize_page(out.pages[0].export(), page, interactive=False)
cols[2].pyplot(fig)
# Page reconsitution under input page
page_export = out.pages[0].export()
if "rotation" not in det_arch:
img = out.pages[0].synthesize()
cols[3].image(img, clamp=True)
# Display JSON
st.markdown("\nHere are your analysis results in JSON format:")
st.json(page_export)
if __name__ == "__main__":
main(DET_ARCHS, RECO_ARCHS)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import numpy as np
import tensorflow as tf
from doctr.models import ocr_predictor
from doctr.models.predictor import OCRPredictor
DET_ARCHS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet18_rotation"]
RECO_ARCHS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "master", "sar_resnet31"]
def load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor:
"""
Args:
device is tf.device
"""
with device:
predictor = ocr_predictor(
det_arch, reco_arch, pretrained=True, assume_straight_pages=("rotation" not in det_arch)
)
return predictor
def forward_image(predictor: OCRPredictor, image: np.ndarray, device) -> np.ndarray:
"""
Args:
device is tf.device
"""
with device:
processed_batches = predictor.det_predictor.pre_processor([image])
out = predictor.det_predictor.model(processed_batches[0], return_model_output=True)
seg_map = out["out_map"]
with tf.device("/cpu:0"):
seg_map = tf.identity(seg_map).numpy()
return seg_map
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import numpy as np
import torch
from doctr.models import ocr_predictor
from doctr.models.predictor import OCRPredictor
DET_ARCHS = ["db_resnet50", "db_mobilenet_v3_large", "linknet_resnet50_rotation"]
RECO_ARCHS = ["crnn_vgg16_bn", "crnn_mobilenet_v3_small", "master", "sar_resnet31"]
def load_predictor(det_arch: str, reco_arch: str, device) -> OCRPredictor:
"""
Args:
device is torch.device
"""
predictor = ocr_predictor(
det_arch, reco_arch, pretrained=True, assume_straight_pages=("rotation" not in det_arch)
).to(device)
return predictor
def forward_image(predictor: OCRPredictor, image: np.ndarray, device) -> np.ndarray:
"""
Args:
device is torch.device
"""
with torch.no_grad():
processed_batches = predictor.det_predictor.pre_processor([image])
out = predictor.det_predictor.model(processed_batches[0].to(device), return_model_output=True)
seg_map = out["out_map"].to("cpu").numpy()
return seg_map
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
"""
Image classification latency benchmark
"""
import argparse
import os
import time
import numpy as np
import tensorflow as tf
os.environ["USE_TF"] = "1"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from doctr.models import classification
def main(args):
if args.gpu:
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
else:
raise AssertionError("TensorFlow cannot access your GPU. Please investigate!")
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Pretrained imagenet model
model = classification.__dict__[args.arch](
pretrained=args.pretrained,
input_shape=(args.size, args.size, 3),
)
# Input
img_tensor = tf.random.uniform(shape=[args.batch_size, args.size, args.size, 3], maxval=1, dtype=tf.float32)
# Warmup
for _ in range(10):
_ = model(img_tensor, training=False)
timings = []
# Evaluation runs
for _ in range(args.it):
start_ts = time.perf_counter()
_ = model(img_tensor, training=False)
timings.append(time.perf_counter() - start_ts)
_timings = np.array(timings)
print(f"{args.arch} ({args.it} runs on ({args.size}, {args.size}) inputs)")
print(f"mean {1000 * _timings.mean():.2f}ms, std {1000 * _timings.std():.2f}ms")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="docTR latency benchmark for imag classification (TensorFlow)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="Architecture to use")
parser.add_argument("--size", type=int, default=32, help="The image input size")
parser.add_argument("--batch-size", "-b", type=int, default=64, help="The batch_size")
parser.add_argument("--gpu", dest="gpu", help="Should the benchmark be performed on GPU", action="store_true")
parser.add_argument("--it", type=int, default=100, help="Number of iterations to run")
parser.add_argument(
"--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo", action="store_true"
)
args = parser.parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
"""
Image classification latency benchmark
"""
import argparse
import os
import time
import numpy as np
import torch
os.environ["USE_TORCH"] = "1"
from doctr.models import classification
@torch.no_grad()
def main(args):
device = torch.device("cuda:0" if args.gpu else "cpu")
# Pretrained imagenet model
model = (
classification.__dict__[args.arch](
pretrained=args.pretrained,
)
.eval()
.to(device=device)
)
# Input
img_tensor = torch.rand((args.batch_size, 3, args.size, args.size)).to(device=device)
# Warmup
for _ in range(10):
_ = model(img_tensor)
timings = []
# Evaluation runs
for _ in range(args.it):
start_ts = time.perf_counter()
_ = model(img_tensor)
timings.append(time.perf_counter() - start_ts)
_timings = np.array(timings)
print(f"{args.arch} ({args.it} runs on ({args.size}, {args.size}) inputs in batches of {args.batch_size})")
print(f"mean {1000 * _timings.mean():.2f}ms, std {1000 * _timings.std():.2f}ms")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="docTR latency benchmark for image classification (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="Architecture to use")
parser.add_argument("--size", type=int, default=32, help="The image input size")
parser.add_argument("--batch-size", "-b", type=int, default=64, help="The batch_size")
parser.add_argument("--gpu", dest="gpu", help="Should the benchmark be performed on GPU", action="store_true")
parser.add_argument("--it", type=int, default=100, help="Number of iterations to run")
parser.add_argument(
"--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo", action="store_true"
)
args = parser.parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TF"] = "1"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import datetime
import multiprocessing as mp
import time
import numpy as np
import tensorflow as tf
import wandb
from fastprogress.fastprogress import master_bar, progress_bar
from tensorflow.keras import mixed_precision
from doctr.models import login_to_hub, push_to_hf_hub
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
from doctr import transforms as T
from doctr.datasets import VOCABS, CharacterGenerator, DataLoader
from doctr.models import classification
from doctr.models.utils import export_model_to_onnx
from utils import plot_recorder, plot_samples
def record_lr(
model: tf.keras.Model,
train_loader: DataLoader,
batch_transforms,
optimizer,
start_lr: float = 1e-7,
end_lr: float = 1,
num_it: int = 100,
amp: bool = False,
):
"""Gridsearch the optimal learning rate for the training.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
"""
if num_it > len(train_loader):
raise ValueError("the value of `num_it` needs to be lower than the number of available batches")
# Update param groups & LR
gamma = (end_lr / start_lr) ** (1 / (num_it - 1))
optimizer.learning_rate = start_lr
lr_recorder = [start_lr * gamma**idx for idx in range(num_it)]
loss_recorder = []
for batch_idx, (images, targets) in enumerate(train_loader):
images = batch_transforms(images)
# Forward, Backward & update
with tf.GradientTape() as tape:
out = model(images, training=True)
train_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(targets, out)
grads = tape.gradient(train_loss, model.trainable_weights)
if amp:
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
optimizer.learning_rate = optimizer.learning_rate * gamma
# Record
train_loss = train_loss.numpy()
if np.any(np.isnan(train_loss)):
if batch_idx == 0:
raise ValueError("loss value is NaN or inf.")
else:
break
loss_recorder.append(train_loss.mean())
# Stop after the number of iterations
if batch_idx + 1 == num_it:
break
return lr_recorder[: len(loss_recorder)], loss_recorder
def fit_one_epoch(model, train_loader, batch_transforms, optimizer, mb, amp=False):
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_loader, parent=mb):
images = batch_transforms(images)
with tf.GradientTape() as tape:
out = model(images, training=True)
train_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(targets, out)
grads = tape.gradient(train_loss, model.trainable_weights)
if amp:
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
mb.child.comment = f"Training loss: {train_loss.numpy().mean():.6}"
def evaluate(model, val_loader, batch_transforms):
# Validation loop
val_loss, correct, samples, batch_cnt = 0, 0, 0, 0
val_iter = iter(val_loader)
for images, targets in val_iter:
images = batch_transforms(images)
out = model(images, training=False)
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(targets, out)
# Compute metric
correct += int((out.numpy().argmax(1) == targets.numpy()).sum())
val_loss += loss.numpy().mean()
batch_cnt += 1
samples += images.shape[0]
val_loss /= batch_cnt
acc = correct / samples
return val_loss, acc
def collate_fn(samples):
images, targets = zip(*samples)
images = tf.stack(images, axis=0)
return images, tf.convert_to_tensor(targets)
def main(args):
print(args)
if args.push_to_hub:
login_to_hub()
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
vocab = VOCABS[args.vocab]
fonts = args.font.split(",")
# AMP
if args.amp:
mixed_precision.set_global_policy("mixed_float16")
# Load val data generator
st = time.time()
val_set = CharacterGenerator(
vocab=vocab,
num_samples=args.val_samples * len(vocab),
cache_samples=True,
img_transforms=T.Compose(
[
T.Resize((args.input_size, args.input_size)),
# Ensure we have a 90% split of white-background images
T.RandomApply(T.ColorInversion(), 0.9),
]
),
font_family=fonts,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
num_workers=args.workers,
collate_fn=collate_fn,
)
print(
f"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in "
f"{val_loader.num_batches} batches)"
)
# Load doctr model
model = classification.__dict__[args.arch](
pretrained=args.pretrained,
input_shape=(args.input_size, args.input_size, 3),
num_classes=len(vocab),
classes=list(vocab),
include_top=True,
)
# Resume weights
if isinstance(args.resume, str):
model.load_weights(args.resume)
batch_transforms = T.Compose(
[
T.Normalize(mean=(0.694, 0.695, 0.693), std=(0.299, 0.296, 0.301)),
]
)
if args.test_only:
print("Running evaluation")
val_loss, acc = evaluate(model, val_loader, batch_transforms)
print(f"Validation loss: {val_loss:.6} (Acc: {acc:.2%})")
return
st = time.time()
# Load train data generator
train_set = CharacterGenerator(
vocab=vocab,
num_samples=args.train_samples * len(vocab),
cache_samples=True,
img_transforms=T.Compose(
[
T.Resize((args.input_size, args.input_size)),
# Augmentations
T.RandomApply(T.ColorInversion(), 0.9),
T.RandomApply(T.ToGray(3), 0.1),
T.RandomJpegQuality(60),
T.RandomSaturation(0.3),
T.RandomContrast(0.3),
T.RandomBrightness(0.3),
# Blur
T.RandomApply(T.GaussianBlur(kernel_shape=(3, 3), std=(0.1, 3)), 0.3),
]
),
font_family=fonts,
)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_workers=args.workers,
collate_fn=collate_fn,
)
print(
f"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in "
f"{train_loader.num_batches} batches)"
)
if args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, list(map(vocab.__getitem__, target)))
return
# Optimizer
scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
args.lr,
decay_steps=args.epochs * len(train_loader),
decay_rate=1 / (1e3), # final lr as a fraction of initial lr
staircase=False,
)
optimizer = tf.keras.optimizers.Adam(
learning_rate=scheduler,
beta_1=0.95,
beta_2=0.99,
epsilon=1e-6,
)
if args.amp:
optimizer = mixed_precision.LossScaleOptimizer(optimizer)
# LR Finder
if args.find_lr:
lrs, losses = record_lr(model, train_loader, batch_transforms, optimizer, amp=args.amp)
plot_recorder(lrs, losses)
return
# Tensorboard to monitor training
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}_{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="character-classification",
config={
"learning_rate": args.lr,
"epochs": args.epochs,
"weight_decay": 0.0,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.input_size,
"optimizer": "adam",
"framework": "tensorflow",
"vocab": args.vocab,
"scheduler": "exp_decay",
"pretrained": args.pretrained,
},
)
# Create loss queue
min_loss = np.inf
# Training loop
mb = master_bar(range(args.epochs))
for epoch in mb:
fit_one_epoch(model, train_loader, batch_transforms, optimizer, mb, args.amp)
# Validation loop at the end of each epoch
val_loss, acc = evaluate(model, val_loader, batch_transforms)
if val_loss < min_loss:
print(f"Validation loss decreased {min_loss:.6} --> {val_loss:.6}: saving state...")
model.save_weights(f"./{exp_name}/weights")
min_loss = val_loss
mb.write(f"Epoch {epoch + 1}/{args.epochs} - Validation loss: {val_loss:.6} (Acc: {acc:.2%})")
# W&B
if args.wb:
wandb.log(
{
"val_loss": val_loss,
"acc": acc,
}
)
if args.wb:
run.finish()
if args.push_to_hub:
push_to_hf_hub(model, exp_name, task="classification", run_config=args)
if args.export_onnx:
print("Exporting model to ONNX...")
if args.arch == "vit_b":
# fixed batch size for vit
dummy_input = [tf.TensorSpec([1, args.input_size, args.input_size, 3], tf.float32, name="input")]
else:
# dynamic batch size
dummy_input = [tf.TensorSpec([None, args.input_size, args.input_size, 3], tf.float32, name="input")]
model_path, _ = export_model_to_onnx(model, exp_name, dummy_input)
print(f"Exported model saved in {model_path}")
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR training script for character classification (TensorFlow)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-recognition model to train")
parser.add_argument("--name", type=str, default=None, help="Name of your training experiment")
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train the model on")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="batch size for training")
parser.add_argument("--input_size", type=int, default=32, help="input size H for the model, W = 4*H")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate for the optimizer (Adam)")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--resume", type=str, default=None, help="Path to your checkpoint")
parser.add_argument(
"--font", type=str, default="FreeMono.ttf,FreeSans.ttf,FreeSerif.ttf", help="Font family to be used"
)
parser.add_argument("--vocab", type=str, default="french", help="Vocab to be used for training")
parser.add_argument(
"--train-samples",
dest="train_samples",
type=int,
default=1000,
help="Multiplied by the vocab length gets you the number of training samples that will be used.",
)
parser.add_argument(
"--val-samples",
dest="val_samples",
type=int,
default=20,
help="Multiplied by the vocab length gets you the number of validation samples that will be used.",
)
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Run the validation loop")
parser.add_argument(
"--show-samples", dest="show_samples", action="store_true", help="Display unormalized training samples"
)
parser.add_argument("--wb", dest="wb", action="store_true", help="Log to Weights & Biases")
parser.add_argument("--push-to-hub", dest="push_to_hub", action="store_true", help="Push to Huggingface Hub")
parser.add_argument(
"--pretrained",
dest="pretrained",
action="store_true",
help="Load pretrained parameters before starting the training",
)
parser.add_argument("--export-onnx", dest="export_onnx", action="store_true", help="Export the model to ONNX")
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
parser.add_argument("--find-lr", action="store_true", help="Gridsearch the optimal LR")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
import matplotlib.pyplot as plt
import numpy as np
def plot_samples(images, targets):
# Unnormalize image
num_samples = min(len(images), 12)
num_cols = min(len(images), 8)
num_rows = int(math.ceil(num_samples / num_cols))
_, axes = plt.subplots(num_rows, num_cols, figsize=(20, 5))
for idx in range(num_samples):
img = (255 * images[idx].numpy()).round().clip(0, 255).astype(np.uint8)
if img.shape[0] == 3 and img.shape[2] != 3:
img = img.transpose(1, 2, 0)
row_idx = idx // num_cols
col_idx = idx % num_cols
ax = axes[row_idx] if num_rows > 1 else axes
ax = ax[col_idx] if num_cols > 1 else ax
ax.imshow(img)
ax.set_title(targets[idx])
# Disable axis
for ax in axes.ravel():
ax.axis("off")
plt.show()
def plot_recorder(lr_recorder, loss_recorder, beta: float = 0.95, **kwargs) -> None:
"""Display the results of the LR grid search.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
Args:
lr_recorder: list of LR values
loss_recorder: list of loss values
beta (float, optional): smoothing factor
"""
if len(lr_recorder) != len(loss_recorder) or len(lr_recorder) == 0:
raise AssertionError("Both `lr_recorder` and `loss_recorder` should have the same length")
# Exp moving average of loss
smoothed_losses = []
avg_loss = 0.0
for idx, loss in enumerate(loss_recorder):
avg_loss = beta * avg_loss + (1 - beta) * loss
smoothed_losses.append(avg_loss / (1 - beta ** (idx + 1)))
# Properly rescale Y-axis
data_slice = slice(
min(len(loss_recorder) // 10, 10),
-min(len(loss_recorder) // 20, 5) if len(loss_recorder) >= 20 else len(loss_recorder),
)
vals = np.array(smoothed_losses[data_slice])
min_idx = vals.argmin()
max_val = vals.max() if min_idx is None else vals[: min_idx + 1].max() # type: ignore[misc]
delta = max_val - vals[min_idx]
plt.plot(lr_recorder[data_slice], smoothed_losses[data_slice])
plt.xscale("log")
plt.xlabel("Learning Rate")
plt.ylabel("Training loss")
plt.ylim(vals[min_idx] - 0.1 * delta, max_val + 0.2 * delta)
plt.grid(True, linestyle="--", axis="x")
plt.show(**kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TORCH"] = "1"
import datetime
import logging
import multiprocessing as mp
import time
import numpy as np
import torch
import wandb
from fastprogress.fastprogress import master_bar, progress_bar
from torch.nn.functional import cross_entropy
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiplicativeLR, OneCycleLR
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torchvision.transforms import (
ColorJitter,
Compose,
GaussianBlur,
Grayscale,
InterpolationMode,
Normalize,
RandomRotation,
)
from doctr import transforms as T
from doctr.datasets import VOCABS, CharacterGenerator
from doctr.models import classification, login_to_hub, push_to_hf_hub
from doctr.models.utils import export_model_to_onnx
from utils import plot_recorder, plot_samples
def record_lr(
model: torch.nn.Module,
train_loader: DataLoader,
batch_transforms,
optimizer,
start_lr: float = 1e-7,
end_lr: float = 1,
num_it: int = 100,
amp: bool = False,
):
"""Gridsearch the optimal learning rate for the training.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
"""
if num_it > len(train_loader):
raise ValueError("the value of `num_it` needs to be lower than the number of available batches")
model = model.train()
# Update param groups & LR
optimizer.defaults["lr"] = start_lr
for pgroup in optimizer.param_groups:
pgroup["lr"] = start_lr
gamma = (end_lr / start_lr) ** (1 / (num_it - 1))
scheduler = MultiplicativeLR(optimizer, lambda step: gamma)
lr_recorder = [start_lr * gamma**idx for idx in range(num_it)]
loss_recorder = []
if amp:
scaler = torch.cuda.amp.GradScaler()
for batch_idx, (images, targets) in enumerate(train_loader):
if torch.cuda.is_available():
images = images.cuda()
targets = targets.cuda()
images = batch_transforms(images)
# Forward, Backward & update
optimizer.zero_grad()
if amp:
with torch.cuda.amp.autocast():
out = model(images)
train_loss = cross_entropy(out, targets)
scaler.scale(train_loss).backward()
# Update the params
scaler.step(optimizer)
scaler.update()
else:
out = model(images)
train_loss = cross_entropy(out, targets)
train_loss.backward()
optimizer.step()
# Update LR
scheduler.step()
# Record
if not torch.isfinite(train_loss):
if batch_idx == 0:
raise ValueError("loss value is NaN or inf.")
else:
break
loss_recorder.append(train_loss.item())
# Stop after the number of iterations
if batch_idx + 1 == num_it:
break
return lr_recorder[: len(loss_recorder)], loss_recorder
def fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, mb, amp=False):
if amp:
scaler = torch.cuda.amp.GradScaler()
model.train()
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_loader, parent=mb):
if torch.cuda.is_available():
images = images.cuda()
targets = targets.cuda()
images = batch_transforms(images)
optimizer.zero_grad()
if amp:
with torch.cuda.amp.autocast():
out = model(images)
train_loss = cross_entropy(out, targets)
scaler.scale(train_loss).backward()
# Update the params
scaler.step(optimizer)
scaler.update()
else:
out = model(images)
train_loss = cross_entropy(out, targets)
train_loss.backward()
optimizer.step()
scheduler.step()
mb.child.comment = f"Training loss: {train_loss.item():.6}"
@torch.no_grad()
def evaluate(model, val_loader, batch_transforms, amp=False):
# Model in eval mode
model.eval()
# Validation loop
val_loss, correct, samples, batch_cnt = 0, 0, 0, 0
for images, targets in val_loader:
images = batch_transforms(images)
if torch.cuda.is_available():
images = images.cuda()
targets = targets.cuda()
if amp:
with torch.cuda.amp.autocast():
out = model(images)
loss = cross_entropy(out, targets)
else:
out = model(images)
loss = cross_entropy(out, targets)
# Compute metric
correct += (out.argmax(dim=1) == targets).sum().item()
val_loss += loss.item()
batch_cnt += 1
samples += images.shape[0]
val_loss /= batch_cnt
acc = correct / samples
return val_loss, acc
def main(args):
print(args)
if args.push_to_hub:
login_to_hub()
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
torch.backends.cudnn.benchmark = True
vocab = VOCABS[args.vocab]
fonts = args.font.split(",")
# Load val data generator
st = time.time()
val_set = CharacterGenerator(
vocab=vocab,
num_samples=args.val_samples * len(vocab),
cache_samples=True,
img_transforms=Compose(
[
T.Resize((args.input_size, args.input_size)),
# Ensure we have a 90% split of white-background images
T.RandomApply(T.ColorInversion(), 0.9),
]
),
font_family=fonts,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
sampler=SequentialSampler(val_set),
pin_memory=torch.cuda.is_available(),
)
print(f"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in " f"{len(val_loader)} batches)")
batch_transforms = Normalize(mean=(0.694, 0.695, 0.693), std=(0.299, 0.296, 0.301))
# Load doctr model
model = classification.__dict__[args.arch](pretrained=args.pretrained, num_classes=len(vocab), classes=list(vocab))
# Resume weights
if isinstance(args.resume, str):
print(f"Resuming {args.resume}")
checkpoint = torch.load(args.resume, map_location="cpu")
model.load_state_dict(checkpoint)
# GPU
if isinstance(args.device, int):
if not torch.cuda.is_available():
raise AssertionError("PyTorch cannot access your GPU. Please investigate!")
if args.device >= torch.cuda.device_count():
raise ValueError("Invalid device index")
# Silent default switch to GPU if available
elif torch.cuda.is_available():
args.device = 0
else:
logging.warning("No accessible GPU, targe device set to CPU.")
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
model = model.cuda()
if args.test_only:
print("Running evaluation")
val_loss, acc = evaluate(model, val_loader, batch_transforms)
print(f"Validation loss: {val_loss:.6} (Acc: {acc:.2%})")
return
st = time.time()
# Load train data generator
train_set = CharacterGenerator(
vocab=vocab,
num_samples=args.train_samples * len(vocab),
cache_samples=True,
img_transforms=Compose(
[
T.Resize((args.input_size, args.input_size)),
# Augmentations
T.RandomApply(T.ColorInversion(), 0.9),
# GaussianNoise
T.RandomApply(Grayscale(3), 0.1),
ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.02),
T.RandomApply(GaussianBlur(kernel_size=(3, 3), sigma=(0.1, 3)), 0.3),
RandomRotation(15, interpolation=InterpolationMode.BILINEAR),
]
),
font_family=fonts,
)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
drop_last=True,
num_workers=args.workers,
sampler=RandomSampler(train_set),
pin_memory=torch.cuda.is_available(),
)
print(f"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in " f"{len(train_loader)} batches)")
if args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, list(map(vocab.__getitem__, target)))
return
# Optimizer
optimizer = torch.optim.Adam(
[p for p in model.parameters() if p.requires_grad],
args.lr,
betas=(0.95, 0.99),
eps=1e-6,
weight_decay=args.weight_decay,
)
# LR Finder
if args.find_lr:
lrs, losses = record_lr(model, train_loader, batch_transforms, optimizer, amp=args.amp)
plot_recorder(lrs, losses)
return
# Scheduler
if args.sched == "cosine":
scheduler = CosineAnnealingLR(optimizer, args.epochs * len(train_loader), eta_min=args.lr / 25e4)
elif args.sched == "onecycle":
scheduler = OneCycleLR(optimizer, args.lr, args.epochs * len(train_loader))
# Training monitoring
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}_{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="character-classification",
config={
"learning_rate": args.lr,
"epochs": args.epochs,
"weight_decay": args.weight_decay,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.input_size,
"optimizer": "adam",
"framework": "pytorch",
"vocab": args.vocab,
"scheduler": args.sched,
"pretrained": args.pretrained,
},
)
# Create loss queue
min_loss = np.inf
# Training loop
mb = master_bar(range(args.epochs))
for epoch in mb:
fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, mb)
# Validation loop at the end of each epoch
val_loss, acc = evaluate(model, val_loader, batch_transforms)
if val_loss < min_loss:
print(f"Validation loss decreased {min_loss:.6} --> {val_loss:.6}: saving state...")
torch.save(model.state_dict(), f"./{exp_name}.pt")
min_loss = val_loss
mb.write(f"Epoch {epoch + 1}/{args.epochs} - Validation loss: {val_loss:.6} (Acc: {acc:.2%})")
# W&B
if args.wb:
wandb.log(
{
"val_loss": val_loss,
"acc": acc,
}
)
if args.wb:
run.finish()
if args.push_to_hub:
push_to_hf_hub(model, exp_name, task="classification", run_config=args)
if args.export_onnx:
print("Exporting model to ONNX...")
dummy_batch = next(iter(val_loader))
dummy_input = dummy_batch[0].cuda() if torch.cuda.is_available() else dummy_batch[0]
model_path = export_model_to_onnx(model, exp_name, dummy_input)
print(f"Exported model saved in {model_path}")
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR training script for character classification (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-recognition model to train")
parser.add_argument("--name", type=str, default=None, help="Name of your training experiment")
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train the model on")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="batch size for training")
parser.add_argument("--device", default=None, type=int, help="device")
parser.add_argument("--input_size", type=int, default=32, help="input size H for the model, W = H")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate for the optimizer (Adam)")
parser.add_argument("--wd", "--weight-decay", default=0, type=float, help="weight decay", dest="weight_decay")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--resume", type=str, default=None, help="Path to your checkpoint")
parser.add_argument(
"--font", type=str, default="FreeMono.ttf,FreeSans.ttf,FreeSerif.ttf", help="Font family to be used"
)
parser.add_argument("--vocab", type=str, default="french", help="Vocab to be used for training")
parser.add_argument(
"--train-samples",
dest="train_samples",
type=int,
default=1000,
help="Multiplied by the vocab length gets you the number of training samples that will be used.",
)
parser.add_argument(
"--val-samples",
dest="val_samples",
type=int,
default=20,
help="Multiplied by the vocab length gets you the number of validation samples that will be used.",
)
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Run the validation loop")
parser.add_argument(
"--show-samples", dest="show_samples", action="store_true", help="Display unormalized training samples"
)
parser.add_argument("--wb", dest="wb", action="store_true", help="Log to Weights & Biases")
parser.add_argument("--push-to-hub", dest="push_to_hub", action="store_true", help="Push to Huggingface Hub")
parser.add_argument(
"--pretrained",
dest="pretrained",
action="store_true",
help="Load pretrained parameters before starting the training",
)
parser.add_argument("--export-onnx", dest="export_onnx", action="store_true", help="Export the model to ONNX")
parser.add_argument("--sched", type=str, default="cosine", help="scheduler to use")
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
parser.add_argument("--find-lr", action="store_true", help="Gridsearch the optimal LR")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
"""
Text recognition latency benchmark
"""
import argparse
import os
import time
import numpy as np
import tensorflow as tf
os.environ["USE_TF"] = "1"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from doctr.models import recognition
def main(args):
if args.gpu:
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
else:
raise AssertionError("TensorFlow cannot access your GPU. Please investigate!")
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
spatial_shape = (args.size, 4 * args.size)
# Pretrained imagenet model
model = recognition.__dict__[args.arch](
pretrained=args.pretrained,
pretrained_backbone=False,
input_shape=(*spatial_shape, 3),
)
# Input
img_tensor = tf.random.uniform(shape=[args.batch_size, *spatial_shape, 3], maxval=1, dtype=tf.float32)
# Warmup
for _ in range(10):
_ = model(img_tensor, training=False)
timings = []
# Evaluation runs
for _ in range(args.it):
start_ts = time.perf_counter()
_ = model(img_tensor, training=False)
timings.append(time.perf_counter() - start_ts)
_timings = np.array(timings)
print(f"{args.arch} ({args.it} runs on {spatial_shape} inputs in batches of {args.batch_size})")
print(f"mean {1000 * _timings.mean():.2f}ms, std {1000 * _timings.std():.2f}ms")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="docTR latency benchmark for text recognition (TensorFlow)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="Architecture to use")
parser.add_argument("--batch-size", "-b", type=int, default=64, help="The batch_size")
parser.add_argument("--size", type=int, default=32, help="The image input size")
parser.add_argument("--gpu", dest="gpu", help="Should the benchmark be performed on GPU", action="store_true")
parser.add_argument("--it", type=int, default=100, help="Number of iterations to run")
parser.add_argument(
"--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo", action="store_true"
)
args = parser.parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
"""
Text recognition latency benchmark
"""
import argparse
import os
import time
import numpy as np
import torch
os.environ["USE_TORCH"] = "1"
from doctr.models import recognition
@torch.no_grad()
def main(args):
device = torch.device("cuda:0" if args.gpu else "cpu")
# Pretrained imagenet model
model = (
recognition.__dict__[args.arch](
pretrained=args.pretrained,
pretrained_backbone=False,
)
.eval()
.to(device=device)
)
# Input
img_tensor = torch.rand((args.batch_size, 3, args.size, 4 * args.size)).to(device=device)
# Warmup
for _ in range(10):
_ = model(img_tensor)
timings = []
# Evaluation runs
for _ in range(args.it):
start_ts = time.perf_counter()
_ = model(img_tensor)
timings.append(time.perf_counter() - start_ts)
_timings = np.array(timings)
print(f"{args.arch} ({args.it} runs on ({args.size}, {4 * args.size}) inputs in batches of {args.batch_size})")
print(f"mean {1000 * _timings.mean():.2f}ms, std {1000 * _timings.std():.2f}ms")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="docTR latency benchmark for text recognition (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="Architecture to use")
parser.add_argument("--batch-size", "-b", type=int, default=64, help="The batch_size")
parser.add_argument("--size", type=int, default=32, help="The image input size")
parser.add_argument("--gpu", dest="gpu", help="Should the benchmark be performed on GPU", action="store_true")
parser.add_argument("--it", type=int, default=100, help="Number of iterations to run")
parser.add_argument(
"--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo", action="store_true"
)
args = parser.parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TF"] = "1"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import datetime
import hashlib
import multiprocessing as mp
import time
from pathlib import Path
import numpy as np
import tensorflow as tf
import wandb
from fastprogress.fastprogress import master_bar, progress_bar
from tensorflow.keras import mixed_precision
from doctr.models import login_to_hub, push_to_hf_hub
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
from doctr import transforms as T
from doctr.datasets import VOCABS, DataLoader, RecognitionDataset, WordGenerator
from doctr.models import recognition
from doctr.utils.metrics import TextMatch
from utils import plot_recorder, plot_samples
def record_lr(
model: tf.keras.Model,
train_loader: DataLoader,
batch_transforms,
optimizer,
start_lr: float = 1e-7,
end_lr: float = 1,
num_it: int = 100,
amp: bool = False,
):
"""Gridsearch the optimal learning rate for the training.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
"""
if num_it > len(train_loader):
raise ValueError("the value of `num_it` needs to be lower than the number of available batches")
# Update param groups & LR
gamma = (end_lr / start_lr) ** (1 / (num_it - 1))
optimizer.learning_rate = start_lr
lr_recorder = [start_lr * gamma**idx for idx in range(num_it)]
loss_recorder = []
for batch_idx, (images, targets) in enumerate(train_loader):
images = batch_transforms(images)
# Forward, Backward & update
with tf.GradientTape() as tape:
train_loss = model(images, targets, training=True)["loss"]
grads = tape.gradient(train_loss, model.trainable_weights)
if amp:
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
optimizer.learning_rate = optimizer.learning_rate * gamma
# Record
train_loss = train_loss.numpy()
if np.any(np.isnan(train_loss)):
if batch_idx == 0:
raise ValueError("loss value is NaN or inf.")
else:
break
loss_recorder.append(train_loss.mean())
# Stop after the number of iterations
if batch_idx + 1 == num_it:
break
return lr_recorder[: len(loss_recorder)], loss_recorder
def fit_one_epoch(model, train_loader, batch_transforms, optimizer, mb, amp=False):
train_iter = iter(train_loader)
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_iter, parent=mb):
images = batch_transforms(images)
with tf.GradientTape() as tape:
train_loss = model(images, targets, training=True)["loss"]
grads = tape.gradient(train_loss, model.trainable_weights)
if amp:
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
mb.child.comment = f"Training loss: {train_loss.numpy().mean():.6}"
def evaluate(model, val_loader, batch_transforms, val_metric):
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
val_iter = iter(val_loader)
for images, targets in val_iter:
images = batch_transforms(images)
out = model(images, targets, return_preds=True, training=False)
# Compute metric
if len(out["preds"]):
words, _ = zip(*out["preds"])
else:
words = []
val_metric.update(targets, words)
val_loss += out["loss"].numpy().mean()
batch_cnt += 1
val_loss /= batch_cnt
result = val_metric.summary()
return val_loss, result["raw"], result["unicase"]
def main(args):
print(args)
if args.push_to_hub:
login_to_hub()
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
vocab = VOCABS[args.vocab]
fonts = args.font.split(",")
# AMP
if args.amp:
mixed_precision.set_global_policy("mixed_float16")
st = time.time()
if isinstance(args.val_path, str):
with open(os.path.join(args.val_path, "labels.json"), "rb") as f:
val_hash = hashlib.sha256(f.read()).hexdigest()
# Load val data generator
val_set = RecognitionDataset(
img_folder=os.path.join(args.val_path, "images"),
labels_path=os.path.join(args.val_path, "labels.json"),
img_transforms=T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
)
else:
val_hash = None
# Load synthetic data generator
val_set = WordGenerator(
vocab=vocab,
min_chars=args.min_chars,
max_chars=args.max_chars,
num_samples=args.val_samples * len(vocab),
font_family=fonts,
img_transforms=T.Compose(
[
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Ensure we have a 90% split of white-background images
T.RandomApply(T.ColorInversion(), 0.9),
]
),
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
num_workers=args.workers,
)
print(
f"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in "
f"{val_loader.num_batches} batches)"
)
# Load doctr model
model = recognition.__dict__[args.arch](
pretrained=args.pretrained,
input_shape=(args.input_size, 4 * args.input_size, 3),
vocab=vocab,
)
# Resume weights
if isinstance(args.resume, str):
model.load_weights(args.resume)
# Metrics
val_metric = TextMatch()
batch_transforms = T.Compose(
[
T.Normalize(mean=(0.694, 0.695, 0.693), std=(0.299, 0.296, 0.301)),
]
)
if args.test_only:
print("Running evaluation")
val_loss, exact_match, partial_match = evaluate(model, val_loader, batch_transforms, val_metric)
print(f"Validation loss: {val_loss:.6} (Exact: {exact_match:.2%} | Partial: {partial_match:.2%})")
return
st = time.time()
if isinstance(args.train_path, str):
# Load train data generator
base_path = Path(args.train_path)
parts = (
[base_path]
if base_path.joinpath("labels.json").is_file()
else [base_path.joinpath(sub) for sub in os.listdir(base_path)]
)
with open(parts[0].joinpath("labels.json"), "rb") as f:
train_hash = hashlib.sha256(f.read()).hexdigest()
train_set = RecognitionDataset(
parts[0].joinpath("images"),
parts[0].joinpath("labels.json"),
img_transforms=T.Compose(
[
T.RandomApply(T.ColorInversion(), 0.1),
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Augmentations
T.RandomJpegQuality(60),
T.RandomSaturation(0.3),
T.RandomContrast(0.3),
T.RandomBrightness(0.3),
]
),
)
if len(parts) > 1:
for subfolder in parts[1:]:
train_set.merge_dataset(
RecognitionDataset(subfolder.joinpath("images"), subfolder.joinpath("labels.json"))
)
else:
train_hash = None
# Load synthetic data generator
train_set = WordGenerator(
vocab=vocab,
min_chars=args.min_chars,
max_chars=args.max_chars,
num_samples=args.train_samples * len(vocab),
font_family=fonts,
img_transforms=T.Compose(
[
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Ensure we have a 90% split of white-background images
T.RandomApply(T.ColorInversion(), 0.9),
T.RandomJpegQuality(60),
T.RandomSaturation(0.3),
T.RandomContrast(0.3),
T.RandomBrightness(0.3),
]
),
)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_workers=args.workers,
)
print(
f"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in "
f"{train_loader.num_batches} batches)"
)
if args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, target)
return
# Optimizer
scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
args.lr,
decay_steps=args.epochs * len(train_loader),
decay_rate=1 / (25e4), # final lr as a fraction of initial lr
staircase=False,
)
optimizer = tf.keras.optimizers.Adam(learning_rate=scheduler, beta_1=0.95, beta_2=0.99, epsilon=1e-6, clipnorm=5)
if args.amp:
optimizer = mixed_precision.LossScaleOptimizer(optimizer)
# LR Finder
if args.find_lr:
lrs, losses = record_lr(model, train_loader, batch_transforms, optimizer, amp=args.amp)
plot_recorder(lrs, losses)
return
# Tensorboard to monitor training
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}_{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="text-recognition",
config={
"learning_rate": args.lr,
"epochs": args.epochs,
"weight_decay": 0.0,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.input_size,
"optimizer": "adam",
"framework": "tensorflow",
"scheduler": "exp_decay",
"vocab": args.vocab,
"train_hash": train_hash,
"val_hash": val_hash,
"pretrained": args.pretrained,
},
)
min_loss = np.inf
# Training loop
mb = master_bar(range(args.epochs))
for epoch in mb:
fit_one_epoch(model, train_loader, batch_transforms, optimizer, mb, args.amp)
# Validation loop at the end of each epoch
val_loss, exact_match, partial_match = evaluate(model, val_loader, batch_transforms, val_metric)
if val_loss < min_loss:
print(f"Validation loss decreased {min_loss:.6} --> {val_loss:.6}: saving state...")
model.save_weights(f"./{exp_name}/weights")
min_loss = val_loss
mb.write(
f"Epoch {epoch + 1}/{args.epochs} - Validation loss: {val_loss:.6} "
f"(Exact: {exact_match:.2%} | Partial: {partial_match:.2%})"
)
# W&B
if args.wb:
wandb.log(
{
"val_loss": val_loss,
"exact_match": exact_match,
"partial_match": partial_match,
}
)
if args.wb:
run.finish()
if args.push_to_hub:
push_to_hf_hub(model, exp_name, task="recognition", run_config=args)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR training script for text recognition (TensorFlow)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-recognition model to train")
parser.add_argument("--train_path", type=str, default=None, help="path to train data folder(s)")
parser.add_argument("--val_path", type=str, default=None, help="path to val data folder")
parser.add_argument(
"--train-samples",
type=int,
default=1000,
help="Multiplied by the vocab length gets you the number of synthetic training samples that will be used.",
)
parser.add_argument(
"--val-samples",
type=int,
default=20,
help="Multiplied by the vocab length gets you the number of synthetic validation samples that will be used.",
)
parser.add_argument(
"--font", type=str, default="FreeMono.ttf,FreeSans.ttf,FreeSerif.ttf", help="Font family to be used"
)
parser.add_argument("--min-chars", type=int, default=1, help="Minimum number of characters per synthetic sample")
parser.add_argument("--max-chars", type=int, default=12, help="Maximum number of characters per synthetic sample")
parser.add_argument("--name", type=str, default=None, help="Name of your training experiment")
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train the model on")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="batch size for training")
parser.add_argument("--input_size", type=int, default=32, help="input size H for the model, W = 4*H")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate for the optimizer (Adam)")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--resume", type=str, default=None, help="Path to your checkpoint")
parser.add_argument("--vocab", type=str, default="french", help="Vocab to be used for training")
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Run the validation loop")
parser.add_argument(
"--show-samples", dest="show_samples", action="store_true", help="Display unormalized training samples"
)
parser.add_argument("--wb", dest="wb", action="store_true", help="Log to Weights & Biases")
parser.add_argument("--push-to-hub", dest="push_to_hub", action="store_true", help="Push to Huggingface Hub")
parser.add_argument(
"--pretrained",
dest="pretrained",
action="store_true",
help="Load pretrained parameters before starting the training",
)
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
parser.add_argument("--find-lr", action="store_true", help="Gridsearch the optimal LR")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TORCH"] = "1"
import multiprocessing as mp
import time
import torch
from torch.utils.data import DataLoader, SequentialSampler
from torchvision.transforms import Normalize
from tqdm import tqdm
from doctr import datasets
from doctr import transforms as T
from doctr.datasets import VOCABS
from doctr.models import recognition
from doctr.utils.metrics import TextMatch
@torch.no_grad()
def evaluate(model, val_loader, batch_transforms, val_metric, amp=False):
# Model in eval mode
model.eval()
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
for images, targets in tqdm(val_loader):
try:
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
if amp:
with torch.cuda.amp.autocast():
out = model(images, targets, return_preds=True)
else:
out = model(images, targets, return_preds=True)
# Compute metric
if len(out["preds"]):
words, _ = zip(*out["preds"])
else:
words = []
val_metric.update(targets, words)
val_loss += out["loss"].item()
batch_cnt += 1
except ValueError:
print(f"unexpected symbol/s in targets:\n{targets} \n--> skip batch")
continue
val_loss /= batch_cnt
result = val_metric.summary()
return val_loss, result["raw"], result["unicase"]
def main(args):
print(args)
torch.backends.cudnn.benchmark = True
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
# Load doctr model
model = recognition.__dict__[args.arch](
pretrained=True if args.resume is None else False,
input_shape=(3, args.input_size, 4 * args.input_size),
vocab=VOCABS[args.vocab],
).eval()
# Resume weights
if isinstance(args.resume, str):
print(f"Resuming {args.resume}")
checkpoint = torch.load(args.resume, map_location="cpu")
model.load_state_dict(checkpoint)
st = time.time()
ds = datasets.__dict__[args.dataset](
train=True,
download=True,
recognition_task=True,
use_polygons=args.regular,
img_transforms=T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
)
_ds = datasets.__dict__[args.dataset](
train=False,
download=True,
recognition_task=True,
use_polygons=args.regular,
img_transforms=T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
)
ds.data.extend([(np_img, target) for np_img, target in _ds.data])
test_loader = DataLoader(
ds,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
sampler=SequentialSampler(ds),
pin_memory=torch.cuda.is_available(),
collate_fn=ds.collate_fn,
)
print(f"Test set loaded in {time.time() - st:.4}s ({len(ds)} samples in " f"{len(test_loader)} batches)")
mean, std = model.cfg["mean"], model.cfg["std"]
batch_transforms = Normalize(mean=mean, std=std)
# Metrics
val_metric = TextMatch()
# GPU
if isinstance(args.device, int):
if not torch.cuda.is_available():
raise AssertionError("PyTorch cannot access your GPU. Please investigate!")
if args.device >= torch.cuda.device_count():
raise ValueError("Invalid device index")
# Silent default switch to GPU if available
elif torch.cuda.is_available():
args.device = 0
else:
print("No accessible GPU, targe device set to CPU.")
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
model = model.cuda()
print("Running evaluation")
val_loss, exact_match, partial_match = evaluate(model, test_loader, batch_transforms, val_metric, amp=args.amp)
print(f"Validation loss: {val_loss:.6} (Exact: {exact_match:.2%} | Partial: {partial_match:.2%})")
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="docTR evaluation script for text recognition (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-recognition model to evaluate")
parser.add_argument("--vocab", type=str, default="french", help="Vocab to be used for evaluation")
parser.add_argument("--dataset", type=str, default="FUNSD", help="Dataset to evaluate on")
parser.add_argument("--device", default=None, type=int, help="device")
parser.add_argument("-b", "--batch_size", type=int, default=1, help="batch size for evaluation")
parser.add_argument("--input_size", type=int, default=32, help="input size H for the model, W = 4*H")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument(
"--only_regular", dest="regular", action="store_true", help="test set contains only regular text"
)
parser.add_argument("--resume", type=str, default=None, help="Checkpoint to resume")
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TF"] = "1"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import multiprocessing as mp
import time
import tensorflow as tf
from tensorflow.keras import mixed_precision
from tqdm import tqdm
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
from doctr import datasets
from doctr import transforms as T
from doctr.datasets import VOCABS, DataLoader
from doctr.models import recognition
from doctr.utils.metrics import TextMatch
def evaluate(model, val_loader, batch_transforms, val_metric):
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
val_iter = iter(val_loader)
for images, targets in tqdm(val_iter):
try:
images = batch_transforms(images)
out = model(images, targets, return_preds=True, training=False)
# Compute metric
if len(out["preds"]):
words, _ = zip(*out["preds"])
else:
words = []
val_metric.update(targets, words)
val_loss += out["loss"].numpy().mean()
batch_cnt += 1
except ValueError:
print(f"unexpected symbol/s in targets:\n{targets} \n--> skip batch")
continue
val_loss /= batch_cnt
result = val_metric.summary()
return val_loss, result["raw"], result["unicase"]
def main(args):
print(args)
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
# AMP
if args.amp:
mixed_precision.set_global_policy("mixed_float16")
# Load doctr model
model = recognition.__dict__[args.arch](
pretrained=True if args.resume is None else False,
input_shape=(args.input_size, 4 * args.input_size, 3),
vocab=VOCABS[args.vocab],
)
# Resume weights
if isinstance(args.resume, str):
model.load_weights(args.resume)
st = time.time()
ds = datasets.__dict__[args.dataset](
train=True,
download=True,
recognition_task=True,
use_polygons=args.regular,
img_transforms=T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
)
_ds = datasets.__dict__[args.dataset](
train=False,
download=True,
recognition_task=True,
use_polygons=args.regular,
img_transforms=T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
)
ds.data.extend([(np_img, target) for np_img, target in _ds.data])
test_loader = DataLoader(
ds,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
shuffle=False,
)
print(f"Test set loaded in {time.time() - st:.4}s ({len(ds)} samples in " f"{len(test_loader)} batches)")
mean, std = model.cfg["mean"], model.cfg["std"]
batch_transforms = T.Normalize(mean=mean, std=std)
# Metrics
val_metric = TextMatch()
print("Running evaluation")
val_loss, exact_match, partial_match = evaluate(model, test_loader, batch_transforms, val_metric)
print(f"Validation loss: {val_loss:.6} (Exact: {exact_match:.2%} | Partial: {partial_match:.2%})")
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="docTR evaluation script for text recognition (TensorFlow)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-recognition model to evaluate")
parser.add_argument("--vocab", type=str, default="french", help="Vocab to be used for evaluation")
parser.add_argument("--dataset", type=str, default="FUNSD", help="Dataset to evaluate on")
parser.add_argument("-b", "--batch_size", type=int, default=1, help="batch size for evaluation")
parser.add_argument("--input_size", type=int, default=32, help="input size H for the model, W = 4*H")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument(
"--only_regular", dest="regular", action="store_true", help="test set contains only regular text"
)
parser.add_argument("--resume", type=str, default=None, help="Checkpoint to resume")
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import math
import matplotlib.pyplot as plt
import numpy as np
def plot_samples(images, targets):
# Unnormalize image
num_samples = min(len(images), 12)
num_cols = min(len(images), 4)
num_rows = int(math.ceil(num_samples / num_cols))
_, axes = plt.subplots(num_rows, num_cols, figsize=(20, 5))
for idx in range(num_samples):
img = (255 * images[idx].numpy()).round().clip(0, 255).astype(np.uint8)
if img.shape[0] == 3 and img.shape[2] != 3:
img = img.transpose(1, 2, 0)
row_idx = idx // num_cols
col_idx = idx % num_cols
ax = axes[row_idx] if num_rows > 1 else axes
ax = ax[col_idx] if num_cols > 1 else ax
ax.imshow(img)
ax.set_title(targets[idx])
# Disable axis
for ax in axes.ravel():
ax.axis("off")
plt.show()
def plot_recorder(lr_recorder, loss_recorder, beta: float = 0.95, **kwargs) -> None:
"""Display the results of the LR grid search.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py.
Args:
lr_recorder: list of LR values
loss_recorder: list of loss values
beta (float, optional): smoothing factor
"""
if len(lr_recorder) != len(loss_recorder) or len(lr_recorder) == 0:
raise AssertionError("Both `lr_recorder` and `loss_recorder` should have the same length")
# Exp moving average of loss
smoothed_losses = []
avg_loss = 0.0
for idx, loss in enumerate(loss_recorder):
avg_loss = beta * avg_loss + (1 - beta) * loss
smoothed_losses.append(avg_loss / (1 - beta ** (idx + 1)))
# Properly rescale Y-axis
data_slice = slice(
min(len(loss_recorder) // 10, 10),
-min(len(loss_recorder) // 20, 5) if len(loss_recorder) >= 20 else len(loss_recorder),
)
vals = np.array(smoothed_losses[data_slice])
min_idx = vals.argmin()
max_val = vals.max() if min_idx is None else vals[: min_idx + 1].max() # type: ignore[misc]
delta = max_val - vals[min_idx]
plt.plot(lr_recorder[data_slice], smoothed_losses[data_slice])
plt.xscale("log")
plt.xlabel("Learning Rate")
plt.ylabel("Training loss")
plt.ylim(vals[min_idx] - 0.1 * delta, max_val + 0.2 * delta)
plt.grid(True, linestyle="--", axis="x")
plt.show(**kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TORCH"] = "1"
import datetime
import hashlib
import logging
import multiprocessing as mp
import time
from pathlib import Path
import numpy as np
import torch
import wandb
from fastprogress.fastprogress import master_bar, progress_bar
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiplicativeLR, OneCycleLR
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torchvision.transforms import ColorJitter, Compose, Normalize
from doctr import transforms as T
from doctr.datasets import VOCABS, RecognitionDataset, WordGenerator
from doctr.models import login_to_hub, push_to_hf_hub, recognition
from doctr.utils.metrics import TextMatch
from utils import plot_recorder, plot_samples
def record_lr(
model: torch.nn.Module,
train_loader: DataLoader,
batch_transforms,
optimizer,
start_lr: float = 1e-7,
end_lr: float = 1,
num_it: int = 100,
amp: bool = False,
):
"""Gridsearch the optimal learning rate for the training.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
"""
if num_it > len(train_loader):
raise ValueError("the value of `num_it` needs to be lower than the number of available batches")
model = model.train()
# Update param groups & LR
optimizer.defaults["lr"] = start_lr
for pgroup in optimizer.param_groups:
pgroup["lr"] = start_lr
gamma = (end_lr / start_lr) ** (1 / (num_it - 1))
scheduler = MultiplicativeLR(optimizer, lambda step: gamma)
lr_recorder = [start_lr * gamma**idx for idx in range(num_it)]
loss_recorder = []
if amp:
scaler = torch.cuda.amp.GradScaler()
for batch_idx, (images, targets) in enumerate(train_loader):
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
# Forward, Backward & update
optimizer.zero_grad()
if amp:
with torch.cuda.amp.autocast():
train_loss = model(images, targets)["loss"]
scaler.scale(train_loss).backward()
# Gradient clipping
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
# Update the params
scaler.step(optimizer)
scaler.update()
else:
train_loss = model(images, targets)["loss"]
train_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
# Update LR
scheduler.step()
# Record
if not torch.isfinite(train_loss):
if batch_idx == 0:
raise ValueError("loss value is NaN or inf.")
else:
break
loss_recorder.append(train_loss.item())
# Stop after the number of iterations
if batch_idx + 1 == num_it:
break
return lr_recorder[: len(loss_recorder)], loss_recorder
def fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, mb, amp=False):
if amp:
scaler = torch.cuda.amp.GradScaler()
model.train()
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_loader, parent=mb):
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
train_loss = model(images, targets)["loss"]
optimizer.zero_grad()
if amp:
with torch.cuda.amp.autocast():
train_loss = model(images, targets)["loss"]
scaler.scale(train_loss).backward()
# Gradient clipping
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
# Update the params
scaler.step(optimizer)
scaler.update()
else:
train_loss = model(images, targets)["loss"]
train_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
scheduler.step()
mb.child.comment = f"Training loss: {train_loss.item():.6}"
@torch.no_grad()
def evaluate(model, val_loader, batch_transforms, val_metric, amp=False):
# Model in eval mode
model.eval()
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
for images, targets in val_loader:
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
if amp:
with torch.cuda.amp.autocast():
out = model(images, targets, return_preds=True)
else:
out = model(images, targets, return_preds=True)
# Compute metric
if len(out["preds"]):
words, _ = zip(*out["preds"])
else:
words = []
val_metric.update(targets, words)
val_loss += out["loss"].item()
batch_cnt += 1
val_loss /= batch_cnt
result = val_metric.summary()
return val_loss, result["raw"], result["unicase"]
def main(args):
print(args)
if args.push_to_hub:
login_to_hub()
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
torch.backends.cudnn.benchmark = True
vocab = VOCABS[args.vocab]
fonts = args.font.split(",")
# Load val data generator
st = time.time()
if isinstance(args.val_path, str):
with open(os.path.join(args.val_path, "labels.json"), "rb") as f:
val_hash = hashlib.sha256(f.read()).hexdigest()
val_set = RecognitionDataset(
img_folder=os.path.join(args.val_path, "images"),
labels_path=os.path.join(args.val_path, "labels.json"),
img_transforms=T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
)
else:
val_hash = None
# Load synthetic data generator
val_set = WordGenerator(
vocab=vocab,
min_chars=args.min_chars,
max_chars=args.max_chars,
num_samples=args.val_samples * len(vocab),
font_family=fonts,
img_transforms=Compose(
[
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Ensure we have a 90% split of white-background images
T.RandomApply(T.ColorInversion(), 0.9),
]
),
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
sampler=SequentialSampler(val_set),
pin_memory=torch.cuda.is_available(),
collate_fn=val_set.collate_fn,
)
print(f"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in " f"{len(val_loader)} batches)")
batch_transforms = Normalize(mean=(0.694, 0.695, 0.693), std=(0.299, 0.296, 0.301))
# Load doctr model
model = recognition.__dict__[args.arch](pretrained=args.pretrained, vocab=vocab)
# Resume weights
if isinstance(args.resume, str):
print(f"Resuming {args.resume}")
checkpoint = torch.load(args.resume, map_location="cpu")
model.load_state_dict(checkpoint)
# GPU
if isinstance(args.device, int):
if not torch.cuda.is_available():
raise AssertionError("PyTorch cannot access your GPU. Please investigate!")
if args.device >= torch.cuda.device_count():
raise ValueError("Invalid device index")
# Silent default switch to GPU if available
elif torch.cuda.is_available():
args.device = 0
else:
logging.warning("No accessible GPU, targe device set to CPU.")
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
model = model.cuda()
# Metrics
val_metric = TextMatch()
if args.test_only:
print("Running evaluation")
val_loss, exact_match, partial_match = evaluate(model, val_loader, batch_transforms, val_metric, amp=args.amp)
print(f"Validation loss: {val_loss:.6} (Exact: {exact_match:.2%} | Partial: {partial_match:.2%})")
return
st = time.time()
if isinstance(args.train_path, str):
# Load train data generator
base_path = Path(args.train_path)
parts = (
[base_path]
if base_path.joinpath("labels.json").is_file()
else [base_path.joinpath(sub) for sub in os.listdir(base_path)]
)
with open(parts[0].joinpath("labels.json"), "rb") as f:
train_hash = hashlib.sha256(f.read()).hexdigest()
train_set = RecognitionDataset(
parts[0].joinpath("images"),
parts[0].joinpath("labels.json"),
img_transforms=Compose(
[
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Augmentations
T.RandomApply(T.ColorInversion(), 0.1),
ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.02),
]
),
)
if len(parts) > 1:
for subfolder in parts[1:]:
train_set.merge_dataset(
RecognitionDataset(subfolder.joinpath("images"), subfolder.joinpath("labels.json"))
)
else:
train_hash = None
# Load synthetic data generator
train_set = WordGenerator(
vocab=vocab,
min_chars=args.min_chars,
max_chars=args.max_chars,
num_samples=args.train_samples * len(vocab),
font_family=fonts,
img_transforms=Compose(
[
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Ensure we have a 90% split of white-background images
T.RandomApply(T.ColorInversion(), 0.9),
ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.02),
]
),
)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
drop_last=True,
num_workers=args.workers,
sampler=RandomSampler(train_set),
pin_memory=torch.cuda.is_available(),
collate_fn=train_set.collate_fn,
)
print(f"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in " f"{len(train_loader)} batches)")
if args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, target)
return
# Optimizer
optimizer = torch.optim.Adam(
[p for p in model.parameters() if p.requires_grad],
args.lr,
betas=(0.95, 0.99),
eps=1e-6,
weight_decay=args.weight_decay,
)
# LR Finder
if args.find_lr:
lrs, losses = record_lr(model, train_loader, batch_transforms, optimizer, amp=args.amp)
plot_recorder(lrs, losses)
return
# Scheduler
if args.sched == "cosine":
scheduler = CosineAnnealingLR(optimizer, args.epochs * len(train_loader), eta_min=args.lr / 25e4)
elif args.sched == "onecycle":
scheduler = OneCycleLR(optimizer, args.lr, args.epochs * len(train_loader))
# Training monitoring
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}_{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="text-recognition",
config={
"learning_rate": args.lr,
"epochs": args.epochs,
"weight_decay": args.weight_decay,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.input_size,
"optimizer": "adam",
"framework": "pytorch",
"scheduler": args.sched,
"vocab": args.vocab,
"train_hash": train_hash,
"val_hash": val_hash,
"pretrained": args.pretrained,
},
)
# Create loss queue
min_loss = np.inf
# Training loop
mb = master_bar(range(args.epochs))
for epoch in mb:
fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, mb, amp=args.amp)
# Validation loop at the end of each epoch
val_loss, exact_match, partial_match = evaluate(model, val_loader, batch_transforms, val_metric, amp=args.amp)
if val_loss < min_loss:
print(f"Validation loss decreased {min_loss:.6} --> {val_loss:.6}: saving state...")
torch.save(model.state_dict(), f"./{exp_name}.pt")
min_loss = val_loss
mb.write(
f"Epoch {epoch + 1}/{args.epochs} - Validation loss: {val_loss:.6} "
f"(Exact: {exact_match:.2%} | Partial: {partial_match:.2%})"
)
# W&B
if args.wb:
wandb.log(
{
"val_loss": val_loss,
"exact_match": exact_match,
"partial_match": partial_match,
}
)
if args.wb:
run.finish()
if args.push_to_hub:
push_to_hf_hub(model, exp_name, task="recognition", run_config=args)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR training script for text recognition (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-recognition model to train")
parser.add_argument("--train_path", type=str, default=None, help="path to train data folder(s)")
parser.add_argument("--val_path", type=str, default=None, help="path to val data folder")
parser.add_argument(
"--train-samples",
type=int,
default=1000,
help="Multiplied by the vocab length gets you the number of synthetic training samples that will be used.",
)
parser.add_argument(
"--val-samples",
type=int,
default=20,
help="Multiplied by the vocab length gets you the number of synthetic validation samples that will be used.",
)
parser.add_argument(
"--font", type=str, default="FreeMono.ttf,FreeSans.ttf,FreeSerif.ttf", help="Font family to be used"
)
parser.add_argument("--min-chars", type=int, default=1, help="Minimum number of characters per synthetic sample")
parser.add_argument("--max-chars", type=int, default=12, help="Maximum number of characters per synthetic sample")
parser.add_argument("--name", type=str, default=None, help="Name of your training experiment")
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train the model on")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="batch size for training")
parser.add_argument("--device", default=None, type=int, help="device")
parser.add_argument("--input_size", type=int, default=32, help="input size H for the model, W = 4*H")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate for the optimizer (Adam)")
parser.add_argument("--wd", "--weight-decay", default=0, type=float, help="weight decay", dest="weight_decay")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--resume", type=str, default=None, help="Path to your checkpoint")
parser.add_argument("--vocab", type=str, default="french", help="Vocab to be used for training")
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Run the validation loop")
parser.add_argument(
"--show-samples", dest="show_samples", action="store_true", help="Display unormalized training samples"
)
parser.add_argument("--wb", dest="wb", action="store_true", help="Log to Weights & Biases")
parser.add_argument("--push-to-hub", dest="push_to_hub", action="store_true", help="Push to Huggingface Hub")
parser.add_argument(
"--pretrained",
dest="pretrained",
action="store_true",
help="Load pretrained parameters before starting the training",
)
parser.add_argument("--sched", type=str, default="cosine", help="scheduler to use")
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
parser.add_argument("--find-lr", action="store_true", help="Gridsearch the optimal LR")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TORCH"] = "1"
import datetime
import hashlib
import multiprocessing
import time
from pathlib import Path
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import wandb
from fastprogress.fastprogress import master_bar, progress_bar
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.optim.lr_scheduler import CosineAnnealingLR, OneCycleLR
from torch.utils.data import DataLoader, SequentialSampler
from torch.utils.data.distributed import DistributedSampler
from torchvision.transforms import ColorJitter, Compose, Normalize
from doctr import transforms as T
from doctr.datasets import VOCABS, RecognitionDataset, WordGenerator
from doctr.models import login_to_hub, push_to_hf_hub, recognition
from doctr.utils.metrics import TextMatch
from utils import plot_samples
def fit_one_epoch(model, device, train_loader, batch_transforms, optimizer, scheduler, mb, amp=False):
if amp:
scaler = torch.cuda.amp.GradScaler()
model.train()
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_loader, parent=mb):
images = images.to(device)
images = batch_transforms(images)
train_loss = model(images, targets)["loss"]
optimizer.zero_grad()
if amp:
with torch.cuda.amp.autocast():
train_loss = model(images, targets)["loss"]
scaler.scale(train_loss).backward()
# Gradient clipping
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
# Update the params
scaler.step(optimizer)
scaler.update()
else:
train_loss = model(images, targets)["loss"]
train_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
scheduler.step()
mb.child.comment = f"Training loss: {train_loss.item():.6}"
@torch.no_grad()
def evaluate(model, device, val_loader, batch_transforms, val_metric, amp=False):
# Model in eval mode
model.eval()
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
for images, targets in val_loader:
images = images.to(device)
images = batch_transforms(images)
if amp:
with torch.cuda.amp.autocast():
out = model(images, targets, return_preds=True)
else:
out = model(images, targets, return_preds=True)
# Compute metric
if len(out["preds"]):
words, _ = zip(*out["preds"])
else:
words = []
val_metric.update(targets, words)
val_loss += out["loss"].item()
batch_cnt += 1
val_loss /= batch_cnt
result = val_metric.summary()
return val_loss, result["raw"], result["unicase"]
def main(rank: int, world_size: int, args):
"""
Args:
rank (int): device id to put the model on
args: other arguments passed through the CLI
"""
print(args)
if rank == 0 and args.push_to_hub:
login_to_hub()
if not isinstance(args.workers, int):
args.workers = min(16, multiprocessing.cpu_count())
torch.backends.cudnn.benchmark = True
vocab = VOCABS[args.vocab]
fonts = args.font.split(",")
if rank == 0:
# Load val data generator
st = time.time()
if isinstance(args.val_path, str):
with open(os.path.join(args.val_path, "labels.json"), "rb") as f:
val_hash = hashlib.sha256(f.read()).hexdigest()
val_set = RecognitionDataset(
img_folder=os.path.join(args.val_path, "images"),
labels_path=os.path.join(args.val_path, "labels.json"),
img_transforms=T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
)
else:
val_hash = None
# Load synthetic data generator
val_set = WordGenerator(
vocab=vocab,
min_chars=args.min_chars,
max_chars=args.max_chars,
num_samples=args.val_samples * len(vocab),
font_family=fonts,
img_transforms=Compose(
[
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Ensure we have a 90% split of white-background images
T.RandomApply(T.ColorInversion(), 0.9),
]
),
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
sampler=SequentialSampler(val_set),
pin_memory=torch.cuda.is_available(),
collate_fn=val_set.collate_fn,
)
print(
f"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in " f"{len(val_loader)} batches)"
)
batch_transforms = Normalize(mean=(0.694, 0.695, 0.693), std=(0.299, 0.296, 0.301))
# Load doctr model
model = recognition.__dict__[args.arch](pretrained=args.pretrained, vocab=vocab)
# Resume weights
if isinstance(args.resume, str):
print(f"Resuming {args.resume}")
checkpoint = torch.load(args.resume, map_location="cpu")
model.load_state_dict(checkpoint)
# create default process group
device = torch.device("cuda", args.devices[rank])
dist.init_process_group(args.backend, rank=rank, world_size=world_size)
# create local model
model = model.to(device)
# construct DDP model
model = DDP(model, device_ids=[device])
if rank == 0:
# Metrics
val_metric = TextMatch()
if rank == 0 and args.test_only:
print("Running evaluation")
val_loss, exact_match, partial_match = evaluate(
model, device, val_loader, batch_transforms, val_metric, amp=args.amp
)
print(f"Validation loss: {val_loss:.6} (Exact: {exact_match:.2%} | Partial: {partial_match:.2%})")
return
st = time.time()
if isinstance(args.train_path, str):
# Load train data generator
base_path = Path(args.train_path)
parts = (
[base_path]
if base_path.joinpath("labels.json").is_file()
else [base_path.joinpath(sub) for sub in os.listdir(base_path)]
)
with open(parts[0].joinpath("labels.json"), "rb") as f:
train_hash = hashlib.sha256(f.read()).hexdigest()
train_set = RecognitionDataset(
parts[0].joinpath("images"),
parts[0].joinpath("labels.json"),
img_transforms=Compose(
[
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Augmentations
T.RandomApply(T.ColorInversion(), 0.1),
ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.02),
]
),
)
if len(parts) > 1:
for subfolder in parts[1:]:
train_set.merge_dataset(
RecognitionDataset(subfolder.joinpath("images"), subfolder.joinpath("labels.json"))
)
else:
train_hash = None
# Load synthetic data generator
train_set = WordGenerator(
vocab=vocab,
min_chars=args.min_chars,
max_chars=args.max_chars,
num_samples=args.train_samples * len(vocab),
font_family=fonts,
img_transforms=Compose(
[
T.Resize((args.input_size, 4 * args.input_size), preserve_aspect_ratio=True),
# Ensure we have a 90% split of white-background images
T.RandomApply(T.ColorInversion(), 0.9),
ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.02),
]
),
)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
drop_last=True,
num_workers=args.workers,
sampler=DistributedSampler(train_set, num_replicas=world_size, rank=rank, shuffle=True, drop_last=True),
pin_memory=torch.cuda.is_available(),
collate_fn=train_set.collate_fn,
)
print(f"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in " f"{len(train_loader)} batches)")
if rank == 0 and args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, target)
return
# Optimizer
optimizer = torch.optim.Adam(
[p for p in model.parameters() if p.requires_grad],
args.lr,
betas=(0.95, 0.99),
eps=1e-6,
weight_decay=args.weight_decay,
)
# Scheduler
if args.sched == "cosine":
scheduler = CosineAnnealingLR(optimizer, args.epochs * len(train_loader), eta_min=args.lr / 25e4)
elif args.sched == "onecycle":
scheduler = OneCycleLR(optimizer, args.lr, args.epochs * len(train_loader))
# Training monitoring
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}_{current_time}" if args.name is None else args.name
# W&B
if rank == 0 and args.wb:
run = wandb.init(
name=exp_name,
project="text-recognition",
config={
"learning_rate": args.lr,
"epochs": args.epochs,
"weight_decay": args.weight_decay,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.input_size,
"optimizer": "adam",
"framework": "pytorch",
"scheduler": args.sched,
"vocab": args.vocab,
"train_hash": train_hash,
"val_hash": val_hash,
"pretrained": args.pretrained,
},
)
# Create loss queue
min_loss = np.inf
# Training loop
mb = master_bar(range(args.epochs))
for epoch in mb:
fit_one_epoch(model, device, train_loader, batch_transforms, optimizer, scheduler, mb, amp=args.amp)
if rank == 0:
# Validation loop at the end of each epoch
val_loss, exact_match, partial_match = evaluate(
model, device, val_loader, batch_transforms, val_metric, amp=args.amp
)
if val_loss < min_loss:
# All processes should see same parameters as they all start from same
# random parameters and gradients are synchronized in backward passes.
# Therefore, saving it in one process is sufficient.
print(f"Validation loss decreased {min_loss:.6} --> {val_loss:.6}: saving state...")
torch.save(model.state_dict(), f"./{exp_name}.pt")
min_loss = val_loss
mb.write(
f"Epoch {epoch + 1}/{args.epochs} - Validation loss: {val_loss:.6} "
f"(Exact: {exact_match:.2%} | Partial: {partial_match:.2%})"
)
# W&B
if args.wb:
wandb.log(
{
"val_loss": val_loss,
"exact_match": exact_match,
"partial_match": partial_match,
}
)
if rank == 0:
if args.wb:
run.finish()
if args.push_to_hub:
push_to_hf_hub(model, exp_name, task="recognition", run_config=args)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR training script for text recognition (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-recognition model to train")
parser.add_argument("--train_path", type=str, default=None, help="path to train data folder(s)")
parser.add_argument("--val_path", type=str, default=None, help="path to val data folder")
parser.add_argument(
"--train-samples",
type=int,
default=1000,
help="Multiplied by the vocab length gets you the number of synthetic training samples that will be used.",
)
parser.add_argument(
"--val-samples",
type=int,
default=20,
help="Multiplied by the vocab length gets you the number of synthetic validation samples that will be used.",
)
parser.add_argument(
"--font", type=str, default="FreeMono.ttf,FreeSans.ttf,FreeSerif.ttf", help="Font family to be used"
)
parser.add_argument("--min-chars", type=int, default=1, help="Minimum number of characters per synthetic sample")
parser.add_argument("--max-chars", type=int, default=12, help="Maximum number of characters per synthetic sample")
parser.add_argument("--name", type=str, default=None, help="Name of your training experiment")
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train the model on")
parser.add_argument("-b", "--batch_size", type=int, default=64, help="batch size for training")
parser.add_argument("--backend", default="nccl", type=str, help="Backend to use for Torch DDP")
parser.add_argument("--devices", default=None, nargs="+", type=int, help="GPU devices to use for training")
parser.add_argument("--input_size", type=int, default=32, help="input size H for the model, W = 4*H")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate for the optimizer (Adam)")
parser.add_argument("--wd", "--weight-decay", default=0, type=float, help="weight decay", dest="weight_decay")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--resume", type=str, default=None, help="Path to your checkpoint")
parser.add_argument("--vocab", type=str, default="french", help="Vocab to be used for training")
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Run the validation loop")
parser.add_argument(
"--show-samples", dest="show_samples", action="store_true", help="Display unormalized training samples"
)
parser.add_argument("--wb", dest="wb", action="store_true", help="Log to Weights & Biases")
parser.add_argument("--push-to-hub", dest="push_to_hub", action="store_true", help="Push to Huggingface Hub")
parser.add_argument(
"--pretrained",
dest="pretrained",
action="store_true",
help="Load pretrained parameters before starting the training",
)
parser.add_argument("--sched", type=str, default="cosine", help="scheduler to use")
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
if not torch.cuda.is_available():
raise AssertionError("PyTorch cannot access your GPUs. Please investigate!")
if not isinstance(args.devices, list):
args.devices = list(range(torch.cuda.device_count()))
nprocs = len(args.devices)
# Environment variables which need to be
# set when using c10d's default "env"
# initialization mode.
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "29500"
mp.spawn(main, args=(nprocs, args), nprocs=nprocs, join=True)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
"""
Text detection latency benchmark
"""
import argparse
import os
import time
import numpy as np
import tensorflow as tf
os.environ["USE_TF"] = "1"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
from doctr.models import detection
def main(args):
if args.gpu:
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
else:
raise AssertionError("TensorFlow cannot access your GPU. Please investigate!")
else:
os.environ["CUDA_VISIBLE_DEVICES"] = ""
# Pretrained imagenet model
model = detection.__dict__[args.arch](
pretrained=args.pretrained,
pretrained_backbone=False,
input_shape=(args.size, args.size, 3),
)
# Input
img_tensor = tf.random.uniform(shape=[1, args.size, args.size, 3], maxval=1, dtype=tf.float32)
# Warmup
for _ in range(10):
_ = model(img_tensor, training=False)
timings = []
# Evaluation runs
for _ in range(args.it):
start_ts = time.perf_counter()
_ = model(img_tensor, training=False)
timings.append(time.perf_counter() - start_ts)
_timings = np.array(timings)
print(f"{args.arch} ({args.it} runs on ({args.size}, {args.size}) inputs)")
print(f"mean {1000 * _timings.mean():.2f}ms, std {1000 * _timings.std():.2f}ms")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="docTR latency benchmark for text detection (TensorFlow)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="Architecture to use")
parser.add_argument("--size", type=int, default=1024, help="The image input size")
parser.add_argument("--gpu", dest="gpu", help="Should the benchmark be performed on GPU", action="store_true")
parser.add_argument("--it", type=int, default=100, help="Number of iterations to run")
parser.add_argument(
"--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo", action="store_true"
)
args = parser.parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
"""
Text detection latency benchmark
"""
import argparse
import os
import time
import numpy as np
import torch
os.environ["USE_TORCH"] = "1"
from doctr.models import detection
@torch.no_grad()
def main(args):
device = torch.device("cuda:0" if args.gpu else "cpu")
# Pretrained imagenet model
model = (
detection.__dict__[args.arch](pretrained=args.pretrained, pretrained_backbone=False).eval().to(device=device)
)
# Input
img_tensor = torch.rand((1, 3, args.size, args.size)).to(device=device)
# Warmup
for _ in range(10):
_ = model(img_tensor)
timings = []
# Evaluation runs
for _ in range(args.it):
start_ts = time.perf_counter()
_ = model(img_tensor)
timings.append(time.perf_counter() - start_ts)
_timings = np.array(timings)
print(f"{args.arch} ({args.it} runs on ({args.size}, {args.size}) inputs)")
print(f"mean {1000 * _timings.mean():.2f}ms, std {1000 * _timings.std():.2f}ms")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="docTR latency benchmark for text detection (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="Architecture to use")
parser.add_argument("--size", type=int, default=1024, help="The image input size")
parser.add_argument("--gpu", dest="gpu", help="Should the benchmark be performed on GPU", action="store_true")
parser.add_argument("--it", type=int, default=100, help="Number of iterations to run")
parser.add_argument(
"--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo", action="store_true"
)
args = parser.parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TF"] = "1"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import datetime
import hashlib
import multiprocessing as mp
import time
import numpy as np
import psutil
import tensorflow as tf
import wandb
from fastprogress.fastprogress import master_bar, progress_bar
from tensorflow.keras import mixed_precision
from doctr.models import login_to_hub, push_to_hf_hub
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
from doctr import transforms as T
from doctr.datasets import DataLoader, DetectionDataset
from doctr.models import detection
from doctr.utils.metrics import LocalizationConfusion
from utils import load_backbone, plot_recorder, plot_samples
def record_lr(
model: tf.keras.Model,
train_loader: DataLoader,
batch_transforms,
optimizer,
start_lr: float = 1e-7,
end_lr: float = 1,
num_it: int = 100,
amp: bool = False,
):
"""Gridsearch the optimal learning rate for the training.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
"""
if num_it > len(train_loader):
raise ValueError("the value of `num_it` needs to be lower than the number of available batches")
# Update param groups & LR
gamma = (end_lr / start_lr) ** (1 / (num_it - 1))
optimizer.learning_rate = start_lr
lr_recorder = [start_lr * gamma**idx for idx in range(num_it)]
loss_recorder = []
for batch_idx, (images, targets) in enumerate(train_loader):
images = batch_transforms(images)
# Forward, Backward & update
with tf.GradientTape() as tape:
train_loss = model(images, targets, training=True)["loss"]
grads = tape.gradient(train_loss, model.trainable_weights)
if amp:
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
optimizer.learning_rate = optimizer.learning_rate * gamma
# Record
train_loss = train_loss.numpy()
if np.any(np.isnan(train_loss)):
if batch_idx == 0:
raise ValueError("loss value is NaN or inf.")
else:
break
loss_recorder.append(train_loss.mean())
# Stop after the number of iterations
if batch_idx + 1 == num_it:
break
return lr_recorder[: len(loss_recorder)], loss_recorder
def fit_one_epoch(model, train_loader, batch_transforms, optimizer, mb, amp=False):
train_iter = iter(train_loader)
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_iter, parent=mb):
images = batch_transforms(images)
with tf.GradientTape() as tape:
train_loss = model(images, targets, training=True)["loss"]
grads = tape.gradient(train_loss, model.trainable_weights)
if amp:
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
mb.child.comment = f"Training loss: {train_loss.numpy():.6}"
def evaluate(model, val_loader, batch_transforms, val_metric):
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
val_iter = iter(val_loader)
for images, targets in val_iter:
images = batch_transforms(images)
out = model(images, targets, training=False, return_preds=True)
# Compute metric
loc_preds = out["preds"]
for target, loc_pred in zip(targets, loc_preds):
for boxes_gt, boxes_pred in zip(target.values(), loc_pred.values()):
if args.rotation and args.eval_straight:
# Convert pred to boxes [xmin, ymin, xmax, ymax] N, 4, 2 --> N, 4
boxes_pred = np.concatenate((boxes_pred.min(axis=1), boxes_pred.max(axis=1)), axis=-1)
val_metric.update(gts=boxes_gt, preds=boxes_pred[:, :4])
val_loss += out["loss"].numpy()
batch_cnt += 1
val_loss /= batch_cnt
recall, precision, mean_iou = val_metric.summary()
return val_loss, recall, precision, mean_iou
def main(args):
print(args)
if args.push_to_hub:
login_to_hub()
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
system_available_memory = int(psutil.virtual_memory().available / 1024**3)
# AMP
if args.amp:
mixed_precision.set_global_policy("mixed_float16")
st = time.time()
val_set = DetectionDataset(
img_folder=os.path.join(args.val_path, "images"),
label_path=os.path.join(args.val_path, "labels.json"),
sample_transforms=T.SampleCompose(
(
[T.Resize((args.input_size, args.input_size), preserve_aspect_ratio=True, symmetric_pad=True)]
if not args.rotation or args.eval_straight
else []
)
+ (
[
T.Resize(args.input_size, preserve_aspect_ratio=True), # This does not pad
T.RandomRotate(90, expand=True),
T.Resize((args.input_size, args.input_size), preserve_aspect_ratio=True, symmetric_pad=True),
]
if args.rotation and not args.eval_straight
else []
)
),
use_polygons=args.rotation and not args.eval_straight,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
shuffle=False,
drop_last=False,
num_workers=args.workers,
)
print(
f"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in "
f"{val_loader.num_batches} batches)"
)
with open(os.path.join(args.val_path, "labels.json"), "rb") as f:
val_hash = hashlib.sha256(f.read()).hexdigest()
batch_transforms = T.Compose(
[
T.Normalize(mean=(0.798, 0.785, 0.772), std=(0.264, 0.2749, 0.287)),
]
)
# Load doctr model
model = detection.__dict__[args.arch](
pretrained=args.pretrained,
input_shape=(args.input_size, args.input_size, 3),
assume_straight_pages=not args.rotation,
class_names=val_set.class_names,
)
# Resume weights
if isinstance(args.resume, str):
model.load_weights(args.resume)
if isinstance(args.pretrained_backbone, str):
print("Loading backbone weights.")
model = load_backbone(model, args.pretrained_backbone)
print("Done.")
# Metrics
val_metric = LocalizationConfusion(
use_polygons=args.rotation and not args.eval_straight,
mask_shape=(args.input_size, args.input_size),
use_broadcasting=True if system_available_memory > 62 else False,
)
if args.test_only:
print("Running evaluation")
val_loss, recall, precision, mean_iou = evaluate(model, val_loader, batch_transforms, val_metric)
print(
f"Validation loss: {val_loss:.6} (Recall: {recall:.2%} | Precision: {precision:.2%} | "
f"Mean IoU: {mean_iou:.2%})"
)
return
st = time.time()
# Load both train and val data generators
train_set = DetectionDataset(
img_folder=os.path.join(args.train_path, "images"),
label_path=os.path.join(args.train_path, "labels.json"),
img_transforms=T.Compose(
[
# Augmentations
T.RandomApply(T.ColorInversion(), 0.1),
T.RandomJpegQuality(60),
T.RandomSaturation(0.3),
T.RandomContrast(0.3),
T.RandomBrightness(0.3),
]
),
sample_transforms=T.SampleCompose(
(
[T.Resize((args.input_size, args.input_size), preserve_aspect_ratio=True, symmetric_pad=True)]
if not args.rotation
else []
)
+ (
[
T.Resize(args.input_size, preserve_aspect_ratio=True), # This does not pad
T.RandomRotate(90, expand=True),
T.Resize((args.input_size, args.input_size), preserve_aspect_ratio=True, symmetric_pad=True),
]
if args.rotation
else []
)
),
use_polygons=args.rotation,
)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
shuffle=True,
drop_last=True,
num_workers=args.workers,
)
print(
f"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in "
f"{train_loader.num_batches} batches)"
)
with open(os.path.join(args.train_path, "labels.json"), "rb") as f:
train_hash = hashlib.sha256(f.read()).hexdigest()
if args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, target)
return
# Optimizer
scheduler = tf.keras.optimizers.schedules.ExponentialDecay(
args.lr,
decay_steps=args.epochs * len(train_loader),
decay_rate=1 / (25e4), # final lr as a fraction of initial lr
staircase=False,
)
optimizer = tf.keras.optimizers.Adam(learning_rate=scheduler, beta_1=0.95, beta_2=0.99, epsilon=1e-6, clipnorm=5)
if args.amp:
optimizer = mixed_precision.LossScaleOptimizer(optimizer)
# LR Finder
if args.find_lr:
lrs, losses = record_lr(model, train_loader, batch_transforms, optimizer, amp=args.amp)
plot_recorder(lrs, losses)
return
# Tensorboard to monitor training
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}_{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="text-detection",
config={
"learning_rate": args.lr,
"epochs": args.epochs,
"weight_decay": 0.0,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.input_size,
"optimizer": "adam",
"framework": "tensorflow",
"scheduler": "exp_decay",
"train_hash": train_hash,
"val_hash": val_hash,
"pretrained": args.pretrained,
"rotation": args.rotation,
},
)
if args.freeze_backbone:
for layer in model.feat_extractor.layers:
layer.trainable = False
min_loss = np.inf
# Training loop
mb = master_bar(range(args.epochs))
for epoch in mb:
fit_one_epoch(model, train_loader, batch_transforms, optimizer, mb, args.amp)
# Validation loop at the end of each epoch
val_loss, recall, precision, mean_iou = evaluate(model, val_loader, batch_transforms, val_metric)
if val_loss < min_loss:
print(f"Validation loss decreased {min_loss:.6} --> {val_loss:.6}: saving state...")
model.save_weights(f"./{exp_name}/weights")
min_loss = val_loss
log_msg = f"Epoch {epoch + 1}/{args.epochs} - Validation loss: {val_loss:.6} "
if any(val is None for val in (recall, precision, mean_iou)):
log_msg += "(Undefined metric value, caused by empty GTs or predictions)"
else:
log_msg += f"(Recall: {recall:.2%} | Precision: {precision:.2%} | Mean IoU: {mean_iou:.2%})"
mb.write(log_msg)
# W&B
if args.wb:
wandb.log(
{
"val_loss": val_loss,
"recall": recall,
"precision": precision,
"mean_iou": mean_iou,
}
)
if args.wb:
run.finish()
if args.push_to_hub:
push_to_hf_hub(model, exp_name, task="detection", run_config=args)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR training script for text detection (TensorFlow)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("train_path", type=str, help="path to training data folder")
parser.add_argument("val_path", type=str, help="path to validation data folder")
parser.add_argument("arch", type=str, help="text-detection model to train")
parser.add_argument("--name", type=str, default=None, help="Name of your training experiment")
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train the model on")
parser.add_argument("-b", "--batch_size", type=int, default=2, help="batch size for training")
parser.add_argument("--input_size", type=int, default=1024, help="model input size, H = W")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate for the optimizer (Adam)")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--resume", type=str, default=None, help="Path to your checkpoint")
parser.add_argument("--pretrained-backbone", type=str, default=None, help="Path to your backbone weights")
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Run the validation loop")
parser.add_argument(
"--freeze-backbone", dest="freeze_backbone", action="store_true", help="freeze model backbone for fine-tuning"
)
parser.add_argument(
"--show-samples", dest="show_samples", action="store_true", help="Display unormalized training samples"
)
parser.add_argument("--wb", dest="wb", action="store_true", help="Log to Weights & Biases")
parser.add_argument("--push-to-hub", dest="push_to_hub", action="store_true", help="Push to Huggingface Hub")
parser.add_argument(
"--pretrained",
dest="pretrained",
action="store_true",
help="Load pretrained parameters before starting the training",
)
parser.add_argument("--rotation", dest="rotation", action="store_true", help="train with rotated documents")
parser.add_argument(
"--eval-straight",
action="store_true",
help="metrics evaluation with straight boxes instead of polygons to save time + memory",
)
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
parser.add_argument("--find-lr", action="store_true", help="Gridsearch the optimal LR")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from doctr.file_utils import CLASS_NAME
os.environ["USE_TORCH"] = "1"
import logging
import multiprocessing as mp
import time
from pathlib import Path
import psutil
import torch
from torch.utils.data import DataLoader, SequentialSampler
from torchvision.transforms import Normalize
from tqdm import tqdm
from doctr import datasets
from doctr import transforms as T
from doctr.models import detection
from doctr.utils.metrics import LocalizationConfusion
@torch.no_grad()
def evaluate(model, val_loader, batch_transforms, val_metric, amp=False):
# Model in eval mode
model.eval()
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
for images, targets in tqdm(val_loader):
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
targets = [{CLASS_NAME: t["boxes"]} for t in targets]
if amp:
with torch.cuda.amp.autocast():
out = model(images, targets, return_preds=True)
else:
out = model(images, targets, return_preds=True)
# Compute metric
loc_preds = out["preds"]
for target, loc_pred in zip(targets, loc_preds):
for boxes_gt, boxes_pred in zip(target.values(), loc_pred.values()):
# Remove scores
val_metric.update(gts=boxes_gt, preds=boxes_pred[:, :-1])
val_loss += out["loss"].item()
batch_cnt += 1
val_loss /= batch_cnt
recall, precision, mean_iou = val_metric.summary()
return val_loss, recall, precision, mean_iou
def main(args):
print(args)
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
torch.backends.cudnn.benchmark = True
system_available_memory = int(psutil.virtual_memory().available / 1024**3)
# Load docTR model
model = detection.__dict__[args.arch](
pretrained=not isinstance(args.resume, str), assume_straight_pages=not args.rotation
).eval()
if isinstance(args.size, int):
input_shape = (args.size, args.size)
else:
input_shape = model.cfg["input_shape"][-2:]
mean, std = model.cfg["mean"], model.cfg["std"]
st = time.time()
ds = datasets.__dict__[args.dataset](
train=True,
download=True,
use_polygons=args.rotation,
sample_transforms=T.Resize(input_shape),
)
# Monkeypatch
subfolder = ds.root.split("/")[-2:]
ds.root = str(Path(ds.root).parent.parent)
ds.data = [(os.path.join(*subfolder, name), target) for name, target in ds.data]
_ds = datasets.__dict__[args.dataset](
train=False,
download=True,
use_polygons=args.rotation,
sample_transforms=T.Resize(input_shape),
)
subfolder = _ds.root.split("/")[-2:]
ds.data.extend([(os.path.join(*subfolder, name), target) for name, target in _ds.data])
test_loader = DataLoader(
ds,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
sampler=SequentialSampler(ds),
pin_memory=torch.cuda.is_available(),
collate_fn=ds.collate_fn,
)
print(f"Test set loaded in {time.time() - st:.4}s ({len(ds)} samples in " f"{len(test_loader)} batches)")
batch_transforms = Normalize(mean=mean, std=std)
# Resume weights
if isinstance(args.resume, str):
print(f"Resuming {args.resume}")
checkpoint = torch.load(args.resume, map_location="cpu")
model.load_state_dict(checkpoint)
# GPU
if isinstance(args.device, int):
if not torch.cuda.is_available():
raise AssertionError("PyTorch cannot access your GPU. Please investigate!")
if args.device >= torch.cuda.device_count():
raise ValueError("Invalid device index")
# Silent default switch to GPU if available
elif torch.cuda.is_available():
args.device = 0
else:
logging.warning("No accessible GPU, targe device set to CPU.")
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
model = model.cuda()
# Metrics
metric = LocalizationConfusion(
use_polygons=args.rotation,
mask_shape=input_shape,
use_broadcasting=True if system_available_memory > 62 else False,
)
print("Running evaluation")
val_loss, recall, precision, mean_iou = evaluate(model, test_loader, batch_transforms, metric, amp=args.amp)
print(
f"Validation loss: {val_loss:.6} (Recall: {recall:.2%} | Precision: {precision:.2%} | "
f"Mean IoU: {mean_iou:.2%})"
)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="docTR evaluation script for text detection (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-detection model to evaluate")
parser.add_argument("--dataset", type=str, default="FUNSD", help="Dataset to evaluate on")
parser.add_argument("-b", "--batch_size", type=int, default=2, help="batch size for evaluation")
parser.add_argument("--device", default=None, type=int, help="device")
parser.add_argument("--size", type=int, default=None, help="model input size, H = W")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--rotation", dest="rotation", action="store_true", help="inference with rotated bbox")
parser.add_argument("--resume", type=str, default=None, help="Checkpoint to resume")
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
from doctr.file_utils import CLASS_NAME
os.environ["USE_TF"] = "1"
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import multiprocessing as mp
import time
from pathlib import Path
import psutil
import tensorflow as tf
from tensorflow.keras import mixed_precision
from tqdm import tqdm
gpu_devices = tf.config.experimental.list_physical_devices("GPU")
if any(gpu_devices):
tf.config.experimental.set_memory_growth(gpu_devices[0], True)
from doctr import datasets
from doctr import transforms as T
from doctr.datasets import DataLoader
from doctr.models import detection
from doctr.utils.metrics import LocalizationConfusion
def evaluate(model, val_loader, batch_transforms, val_metric):
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
for images, targets in tqdm(val_loader):
images = batch_transforms(images)
targets = [{CLASS_NAME: t["boxes"]} for t in targets]
out = model(images, targets, training=False, return_preds=True)
# Compute metric
loc_preds = out["preds"]
for target, loc_pred in zip(targets, loc_preds):
for boxes_gt, boxes_pred in zip(target.values(), loc_pred.values()):
# Remove scores
val_metric.update(gts=boxes_gt, preds=boxes_pred[:, :-1])
val_loss += out["loss"].numpy()
batch_cnt += 1
val_loss /= batch_cnt
recall, precision, mean_iou = val_metric.summary()
return val_loss, recall, precision, mean_iou
def main(args):
print(args)
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
system_available_memory = int(psutil.virtual_memory().available / 1024**3)
# AMP
if args.amp:
mixed_precision.set_global_policy("mixed_float16")
input_shape = (args.size, args.size, 3) if isinstance(args.size, int) else None
# Load docTR model
model = detection.__dict__[args.arch](
pretrained=isinstance(args.resume, str),
assume_straight_pages=not args.rotation,
input_shape=input_shape,
)
# Resume weights
if isinstance(args.resume, str):
print(f"Resuming {args.resume}")
model.load_weights(args.resume).expect_partial()
input_shape = model.cfg["input_shape"] if input_shape is None else input_shape
mean, std = model.cfg["mean"], model.cfg["std"]
st = time.time()
ds = datasets.__dict__[args.dataset](
train=True,
download=True,
use_polygons=args.rotation,
sample_transforms=T.Resize(input_shape[:2]),
)
# Monkeypatch
subfolder = ds.root.split("/")[-2:]
ds.root = str(Path(ds.root).parent.parent)
ds.data = [(os.path.join(*subfolder, name), target) for name, target in ds.data]
_ds = datasets.__dict__[args.dataset](
train=False,
download=True,
use_polygons=args.rotation,
sample_transforms=T.Resize(input_shape[:2]),
)
subfolder = _ds.root.split("/")[-2:]
ds.data.extend([(os.path.join(*subfolder, name), target) for name, target in _ds.data])
test_loader = DataLoader(
ds,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
shuffle=False,
)
print(f"Test set loaded in {time.time() - st:.4}s ({len(ds)} samples in " f"{len(test_loader)} batches)")
batch_transforms = T.Normalize(mean=mean, std=std)
# Metrics
metric = LocalizationConfusion(
use_polygons=args.rotation,
mask_shape=input_shape[:2],
use_broadcasting=True if system_available_memory > 62 else False,
)
print("Running evaluation")
val_loss, recall, precision, mean_iou = evaluate(model, test_loader, batch_transforms, metric)
print(
f"Validation loss: {val_loss:.6} (Recall: {recall:.2%} | Precision: {precision:.2%} | "
f"Mean IoU: {mean_iou:.2%})"
)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="docTR evaluation script for text detection (TensorFlow)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-detection model to evaluate")
parser.add_argument("--dataset", type=str, default="FUNSD", help="Dataset to evaluate on")
parser.add_argument("-b", "--batch_size", type=int, default=2, help="batch size for evaluation")
parser.add_argument("--size", type=int, default=None, help="model input size, H = W")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--rotation", dest="rotation", action="store_true", help="inference with rotated bbox")
parser.add_argument("--resume", type=str, default=None, help="Checkpoint to resume")
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import pickle
from typing import Dict, List
import cv2
import matplotlib.pyplot as plt
import numpy as np
def plot_samples(images, targets: List[Dict[str, np.ndarray]]) -> None:
# Unnormalize image
nb_samples = min(len(images), 4)
_, axes = plt.subplots(2, nb_samples, figsize=(20, 5))
for idx in range(nb_samples):
img = (255 * images[idx].numpy()).round().clip(0, 255).astype(np.uint8)
if img.shape[0] == 3 and img.shape[2] != 3:
img = img.transpose(1, 2, 0)
target = np.zeros(img.shape[:2], np.uint8)
tgts = targets[idx].copy()
for key, boxes in tgts.items():
boxes[:, [0, 2]] = boxes[:, [0, 2]] * img.shape[1]
boxes[:, [1, 3]] = boxes[:, [1, 3]] * img.shape[0]
boxes[:, :4] = boxes[:, :4].round().astype(int)
for box in boxes:
if boxes.ndim == 3:
cv2.fillPoly(target, [np.int0(box)], 1)
else:
target[int(box[1]) : int(box[3]) + 1, int(box[0]) : int(box[2]) + 1] = 1
if nb_samples > 1:
axes[0][idx].imshow(img)
axes[1][idx].imshow(target.astype(bool))
else:
axes[0].imshow(img)
axes[1].imshow(target.astype(bool))
# Disable axis
for ax in axes.ravel():
ax.axis("off")
plt.show()
def plot_recorder(lr_recorder, loss_recorder, beta: float = 0.95, **kwargs) -> None:
"""Display the results of the LR grid search.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
Args:
lr_recorder: list of LR values
loss_recorder: list of loss values
beta (float, optional): smoothing factor
"""
if len(lr_recorder) != len(loss_recorder) or len(lr_recorder) == 0:
raise AssertionError("Both `lr_recorder` and `loss_recorder` should have the same length")
# Exp moving average of loss
smoothed_losses = []
avg_loss = 0.0
for idx, loss in enumerate(loss_recorder):
avg_loss = beta * avg_loss + (1 - beta) * loss
smoothed_losses.append(avg_loss / (1 - beta ** (idx + 1)))
# Properly rescale Y-axis
data_slice = slice(
min(len(loss_recorder) // 10, 10),
# -min(len(loss_recorder) // 20, 5) if len(loss_recorder) >= 20 else len(loss_recorder)
len(loss_recorder),
)
vals = np.array(smoothed_losses[data_slice])
min_idx = vals.argmin()
max_val = vals.max() if min_idx is None else vals[: min_idx + 1].max() # type: ignore[misc]
delta = max_val - vals[min_idx]
plt.plot(lr_recorder[data_slice], smoothed_losses[data_slice])
plt.xscale("log")
plt.xlabel("Learning Rate")
plt.ylabel("Training loss")
plt.ylim(vals[min_idx] - 0.1 * delta, max_val + 0.2 * delta)
plt.grid(True, linestyle="--", axis="x")
plt.show(**kwargs)
def load_backbone(model, weights_path):
pretrained_backbone_weights = pickle.load(open(weights_path, "rb"))
model.feat_extractor.set_weights(pretrained_backbone_weights[0])
model.fpn.set_weights(pretrained_backbone_weights[1])
return model
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TORCH"] = "1"
import datetime
import hashlib
import logging
import multiprocessing as mp
import time
import numpy as np
import psutil
import torch
import wandb
from fastprogress.fastprogress import master_bar, progress_bar
from torch.optim.lr_scheduler import CosineAnnealingLR, MultiplicativeLR, OneCycleLR
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torchvision.transforms import ColorJitter, Compose, Normalize
from doctr import transforms as T
from doctr.datasets import DetectionDataset
from doctr.models import detection, login_to_hub, push_to_hf_hub
from doctr.utils.metrics import LocalizationConfusion
from utils import plot_recorder, plot_samples
def record_lr(
model: torch.nn.Module,
train_loader: DataLoader,
batch_transforms,
optimizer,
start_lr: float = 1e-7,
end_lr: float = 1,
num_it: int = 100,
amp: bool = False,
):
"""Gridsearch the optimal learning rate for the training.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
"""
if num_it > len(train_loader):
raise ValueError("the value of `num_it` needs to be lower than the number of available batches")
model = model.train()
# Update param groups & LR
optimizer.defaults["lr"] = start_lr
for pgroup in optimizer.param_groups:
pgroup["lr"] = start_lr
gamma = (end_lr / start_lr) ** (1 / (num_it - 1))
scheduler = MultiplicativeLR(optimizer, lambda step: gamma)
lr_recorder = [start_lr * gamma**idx for idx in range(num_it)]
loss_recorder = []
if amp:
scaler = torch.cuda.amp.GradScaler()
for batch_idx, (images, targets) in enumerate(train_loader):
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
# Forward, Backward & update
optimizer.zero_grad()
if amp:
with torch.cuda.amp.autocast():
train_loss = model(images, targets)["loss"]
scaler.scale(train_loss).backward()
# Gradient clipping
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
# Update the params
scaler.step(optimizer)
scaler.update()
else:
train_loss = model(images, targets)["loss"]
train_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
# Update LR
scheduler.step()
# Record
if not torch.isfinite(train_loss):
if batch_idx == 0:
raise ValueError("loss value is NaN or inf.")
else:
break
loss_recorder.append(train_loss.item())
# Stop after the number of iterations
if batch_idx + 1 == num_it:
break
return lr_recorder[: len(loss_recorder)], loss_recorder
def fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, mb, amp=False):
if amp:
scaler = torch.cuda.amp.GradScaler()
model.train()
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_loader, parent=mb):
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
optimizer.zero_grad()
if amp:
with torch.cuda.amp.autocast():
train_loss = model(images, targets)["loss"]
scaler.scale(train_loss).backward()
# Gradient clipping
scaler.unscale_(optimizer)
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
# Update the params
scaler.step(optimizer)
scaler.update()
else:
train_loss = model(images, targets)["loss"]
train_loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), 5)
optimizer.step()
scheduler.step()
mb.child.comment = f"Training loss: {train_loss.item():.6}"
@torch.no_grad()
def evaluate(model, val_loader, batch_transforms, val_metric, amp=False):
# Model in eval mode
model.eval()
# Reset val metric
val_metric.reset()
# Validation loop
val_loss, batch_cnt = 0, 0
for images, targets in val_loader:
if torch.cuda.is_available():
images = images.cuda()
images = batch_transforms(images)
if amp:
with torch.cuda.amp.autocast():
out = model(images, targets, return_preds=True)
else:
out = model(images, targets, return_preds=True)
# Compute metric
loc_preds = out["preds"]
for target, loc_pred in zip(targets, loc_preds):
for boxes_gt, boxes_pred in zip(target.values(), loc_pred.values()):
if args.rotation and args.eval_straight:
# Convert pred to boxes [xmin, ymin, xmax, ymax] N, 4, 2 --> N, 4
boxes_pred = np.concatenate((boxes_pred.min(axis=1), boxes_pred.max(axis=1)), axis=-1)
val_metric.update(gts=boxes_gt, preds=boxes_pred[:, :4])
val_loss += out["loss"].item()
batch_cnt += 1
val_loss /= batch_cnt
recall, precision, mean_iou = val_metric.summary()
return val_loss, recall, precision, mean_iou
def main(args):
print(args)
if args.push_to_hub:
login_to_hub()
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
torch.backends.cudnn.benchmark = True
system_available_memory = int(psutil.virtual_memory().available / 1024**3)
st = time.time()
val_set = DetectionDataset(
img_folder=os.path.join(args.val_path, "images"),
label_path=os.path.join(args.val_path, "labels.json"),
sample_transforms=T.SampleCompose(
(
[T.Resize((args.input_size, args.input_size), preserve_aspect_ratio=True, symmetric_pad=True)]
if not args.rotation or args.eval_straight
else []
)
+ (
[
T.Resize(args.input_size, preserve_aspect_ratio=True), # This does not pad
T.RandomRotate(90, expand=True),
T.Resize((args.input_size, args.input_size), preserve_aspect_ratio=True, symmetric_pad=True),
]
if args.rotation and not args.eval_straight
else []
)
),
use_polygons=args.rotation and not args.eval_straight,
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
sampler=SequentialSampler(val_set),
pin_memory=torch.cuda.is_available(),
collate_fn=val_set.collate_fn,
)
print(f"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in " f"{len(val_loader)} batches)")
with open(os.path.join(args.val_path, "labels.json"), "rb") as f:
val_hash = hashlib.sha256(f.read()).hexdigest()
batch_transforms = Normalize(mean=(0.798, 0.785, 0.772), std=(0.264, 0.2749, 0.287))
# Load doctr model
model = detection.__dict__[args.arch](
pretrained=args.pretrained,
assume_straight_pages=not args.rotation,
class_names=val_set.class_names,
)
# Resume weights
if isinstance(args.resume, str):
print(f"Resuming {args.resume}")
checkpoint = torch.load(args.resume, map_location="cpu")
model.load_state_dict(checkpoint)
# GPU
if isinstance(args.device, int):
if not torch.cuda.is_available():
raise AssertionError("PyTorch cannot access your GPU. Please investigate!")
if args.device >= torch.cuda.device_count():
raise ValueError("Invalid device index")
# Silent default switch to GPU if available
elif torch.cuda.is_available():
args.device = 0
else:
logging.warning("No accessible GPU, targe device set to CPU.")
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
model = model.cuda()
# Metrics
val_metric = LocalizationConfusion(
use_polygons=args.rotation and not args.eval_straight,
mask_shape=(args.input_size, args.input_size),
use_broadcasting=True if system_available_memory > 62 else False,
)
if args.test_only:
print("Running evaluation")
val_loss, recall, precision, mean_iou = evaluate(model, val_loader, batch_transforms, val_metric, amp=args.amp)
print(
f"Validation loss: {val_loss:.6} (Recall: {recall:.2%} | Precision: {precision:.2%} | "
f"Mean IoU: {mean_iou:.2%})"
)
return
st = time.time()
# Load both train and val data generators
train_set = DetectionDataset(
img_folder=os.path.join(args.train_path, "images"),
label_path=os.path.join(args.train_path, "labels.json"),
img_transforms=Compose(
[
# Augmentations
T.RandomApply(T.ColorInversion(), 0.1),
ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.02),
]
),
sample_transforms=T.SampleCompose(
(
[T.Resize((args.input_size, args.input_size), preserve_aspect_ratio=True, symmetric_pad=True)]
if not args.rotation
else []
)
+ (
[
T.Resize(args.input_size, preserve_aspect_ratio=True),
T.RandomRotate(90, expand=True),
T.Resize((args.input_size, args.input_size), preserve_aspect_ratio=True, symmetric_pad=True),
]
if args.rotation
else []
)
),
use_polygons=args.rotation,
)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
drop_last=True,
num_workers=args.workers,
sampler=RandomSampler(train_set),
pin_memory=torch.cuda.is_available(),
collate_fn=train_set.collate_fn,
)
print(f"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in " f"{len(train_loader)} batches)")
with open(os.path.join(args.train_path, "labels.json"), "rb") as f:
train_hash = hashlib.sha256(f.read()).hexdigest()
if args.show_samples:
x, target = next(iter(train_loader))
plot_samples(x, target)
return
# Backbone freezing
if args.freeze_backbone:
for p in model.feat_extractor.parameters():
p.reguires_grad_(False)
# Optimizer
optimizer = torch.optim.Adam(
[p for p in model.parameters() if p.requires_grad],
args.lr,
betas=(0.95, 0.99),
eps=1e-6,
weight_decay=args.weight_decay,
)
# LR Finder
if args.find_lr:
lrs, losses = record_lr(model, train_loader, batch_transforms, optimizer, amp=args.amp)
plot_recorder(lrs, losses)
return
# Scheduler
if args.sched == "cosine":
scheduler = CosineAnnealingLR(optimizer, args.epochs * len(train_loader), eta_min=args.lr / 25e4)
elif args.sched == "onecycle":
scheduler = OneCycleLR(optimizer, args.lr, args.epochs * len(train_loader))
# Training monitoring
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}_{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="text-detection",
config={
"learning_rate": args.lr,
"epochs": args.epochs,
"weight_decay": args.weight_decay,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.input_size,
"optimizer": "adam",
"framework": "pytorch",
"scheduler": args.sched,
"train_hash": train_hash,
"val_hash": val_hash,
"pretrained": args.pretrained,
"rotation": args.rotation,
"amp": args.amp,
},
)
# Create loss queue
min_loss = np.inf
# Training loop
mb = master_bar(range(args.epochs))
for epoch in mb:
fit_one_epoch(model, train_loader, batch_transforms, optimizer, scheduler, mb, amp=args.amp)
# Validation loop at the end of each epoch
val_loss, recall, precision, mean_iou = evaluate(model, val_loader, batch_transforms, val_metric, amp=args.amp)
if val_loss < min_loss:
print(f"Validation loss decreased {min_loss:.6} --> {val_loss:.6}: saving state...")
torch.save(model.state_dict(), f"./{exp_name}.pt")
min_loss = val_loss
log_msg = f"Epoch {epoch + 1}/{args.epochs} - Validation loss: {val_loss:.6} "
if any(val is None for val in (recall, precision, mean_iou)):
log_msg += "(Undefined metric value, caused by empty GTs or predictions)"
else:
log_msg += f"(Recall: {recall:.2%} | Precision: {precision:.2%} | Mean IoU: {mean_iou:.2%})"
mb.write(log_msg)
# W&B
if args.wb:
wandb.log(
{
"val_loss": val_loss,
"recall": recall,
"precision": precision,
"mean_iou": mean_iou,
}
)
if args.wb:
run.finish()
if args.push_to_hub:
push_to_hf_hub(model, exp_name, task="detection", run_config=args)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR training script for text detection (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("train_path", type=str, help="path to training data folder")
parser.add_argument("val_path", type=str, help="path to validation data folder")
parser.add_argument("arch", type=str, help="text-detection model to train")
parser.add_argument("--name", type=str, default=None, help="Name of your training experiment")
parser.add_argument("--epochs", type=int, default=10, help="number of epochs to train the model on")
parser.add_argument("-b", "--batch_size", type=int, default=2, help="batch size for training")
parser.add_argument("--device", default=None, type=int, help="device")
parser.add_argument("--input_size", type=int, default=1024, help="model input size, H = W")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate for the optimizer (Adam)")
parser.add_argument("--wd", "--weight-decay", default=0, type=float, help="weight decay", dest="weight_decay")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--resume", type=str, default=None, help="Path to your checkpoint")
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Run the validation loop")
parser.add_argument(
"--freeze-backbone", dest="freeze_backbone", action="store_true", help="freeze model backbone for fine-tuning"
)
parser.add_argument(
"--show-samples", dest="show_samples", action="store_true", help="Display unormalized training samples"
)
parser.add_argument("--wb", dest="wb", action="store_true", help="Log to Weights & Biases")
parser.add_argument("--push-to-hub", dest="push_to_hub", action="store_true", help="Push to Huggingface Hub")
parser.add_argument(
"--pretrained",
dest="pretrained",
action="store_true",
help="Load pretrained parameters before starting the training",
)
parser.add_argument("--rotation", dest="rotation", action="store_true", help="train with rotated documents")
parser.add_argument(
"--eval-straight",
action="store_true",
help="metrics evaluation with straight boxes instead of polygons to save time + memory",
)
parser.add_argument("--sched", type=str, default="cosine", help="scheduler to use")
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
parser.add_argument("--find-lr", action="store_true", help="Gridsearch the optimal LR")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
"""
Object detection latency benchmark
"""
import argparse
import os
import time
import numpy as np
import torch
os.environ["USE_TORCH"] = "1"
from doctr.models import obj_detection
@torch.no_grad()
def main(args):
device = torch.device("cuda:0" if args.gpu else "cpu")
# Pretrained imagenet model
model = (
obj_detection.__dict__[args.arch](
pretrained=args.pretrained,
min_size=args.size,
max_size=args.size,
)
.eval()
.to(device=device)
)
# Input
img_tensor = torch.rand((1, 3, args.size, args.size)).to(device=device)
# Warmup
for _ in range(10):
_ = model(img_tensor)
timings = []
# Evaluation runs
for _ in range(args.it):
start_ts = time.perf_counter()
_ = model(img_tensor)
timings.append(time.perf_counter() - start_ts)
_timings = np.array(timings)
print(f"{args.arch} ({args.it} runs on ({args.size}, {args.size}) inputs)")
print(f"mean {1000 * _timings.mean():.2f}ms, std {1000 * _timings.std():.2f}ms")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="docTR latency benchmark for object detection (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="Architecture to use")
parser.add_argument("--size", type=int, default=1024, help="The image input size")
parser.add_argument("--gpu", dest="gpu", help="Should the benchmark be performed on GPU", action="store_true")
parser.add_argument("--it", type=int, default=100, help="Number of iterations to run")
parser.add_argument(
"--pretrained", dest="pretrained", help="Use pre-trained models from the modelzoo", action="store_true"
)
args = parser.parse_args()
main(args)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
from typing import Dict, List
import cv2
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.cm import get_cmap
def plot_samples(images, targets: List[Dict[str, np.ndarray]], classes: List[str]) -> None:
cmap = get_cmap("gist_rainbow", len(classes))
# Unnormalize image
nb_samples = min(len(images), 4)
_, axes = plt.subplots(1, nb_samples, figsize=(20, 5))
for idx in range(nb_samples):
img = (255 * images[idx].numpy()).round().clip(0, 255).astype(np.uint8)
if img.shape[0] == 3 and img.shape[2] != 3:
img = img.transpose(1, 2, 0)
target = img.copy()
for box, class_idx in zip(targets[idx]["boxes"].numpy(), targets[idx]["labels"]):
r, g, b, _ = cmap(class_idx.numpy())
color = int(round(255 * r)), int(round(255 * g)), int(round(255 * b))
cv2.rectangle(target, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), color, 2)
text_size, _ = cv2.getTextSize(classes[class_idx], cv2.FONT_HERSHEY_SIMPLEX, 1, 2)
text_w, text_h = text_size
cv2.rectangle(target, (int(box[0]), int(box[1])), (int(box[0]) + text_w, int(box[1]) - text_h), color, -1)
cv2.putText(
target, classes[class_idx], (int(box[0]), int(box[1])), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2
)
axes[idx].imshow(target)
# Disable axis
for ax in axes.ravel():
ax.axis("off")
plt.show()
def plot_recorder(lr_recorder, loss_recorder, beta: float = 0.95, **kwargs) -> None:
"""Display the results of the LR grid search.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
Args:
lr_recorder: list of LR values
loss_recorder: list of loss values
beta (float, optional): smoothing factor
"""
if len(lr_recorder) != len(loss_recorder) or len(lr_recorder) == 0:
raise AssertionError("Both `lr_recorder` and `loss_recorder` should have the same length")
# Exp moving average of loss
smoothed_losses = []
avg_loss = 0.0
for idx, loss in enumerate(loss_recorder):
avg_loss = beta * avg_loss + (1 - beta) * loss
smoothed_losses.append(avg_loss / (1 - beta ** (idx + 1)))
# Properly rescale Y-axis
data_slice = slice(
min(len(loss_recorder) // 10, 10),
-min(len(loss_recorder) // 20, 5) if len(loss_recorder) >= 20 else len(loss_recorder),
)
vals = np.array(smoothed_losses[data_slice])
min_idx = vals.argmin()
max_val = vals.max() if min_idx is None else vals[: min_idx + 1].max() # type: ignore[misc]
delta = max_val - vals[min_idx]
plt.plot(lr_recorder[data_slice], smoothed_losses[data_slice])
plt.xscale("log")
plt.xlabel("Learning Rate")
plt.ylabel("Training loss")
plt.ylim(vals[min_idx] - 0.1 * delta, max_val + 0.2 * delta)
plt.grid(True, linestyle="--", axis="x")
plt.show(**kwargs)
|
# Copyright (C) 2021-2023, Mindee.
# This program is licensed under the Apache License 2.0.
# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.
import os
os.environ["USE_TORCH"] = "1"
import datetime
import logging
import multiprocessing as mp
import time
import numpy as np
import torch
import torch.optim as optim
import wandb
from fastprogress.fastprogress import master_bar, progress_bar
from torch.optim.lr_scheduler import MultiplicativeLR, StepLR
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from torchvision.transforms import ColorJitter, Compose, GaussianBlur
from doctr import transforms as T
from doctr.datasets import DocArtefacts
from doctr.models import login_to_hub, obj_detection, push_to_hf_hub
from doctr.utils import DetectionMetric
from utils import plot_recorder, plot_samples
def record_lr(
model: torch.nn.Module,
train_loader: DataLoader,
optimizer,
start_lr: float = 1e-7,
end_lr: float = 1,
num_it: int = 100,
amp: bool = False,
):
"""Gridsearch the optimal learning rate for the training.
Adapted from https://github.com/frgfm/Holocron/blob/master/holocron/trainer/core.py
"""
if num_it > len(train_loader):
raise ValueError("the value of `num_it` needs to be lower than the number of available batches")
model = model.train()
# Update param groups & LR
optimizer.defaults["lr"] = start_lr
for pgroup in optimizer.param_groups:
pgroup["lr"] = start_lr
gamma = (end_lr / start_lr) ** (1 / (num_it - 1))
scheduler = MultiplicativeLR(optimizer, lambda step: gamma)
lr_recorder = [start_lr * gamma**idx for idx in range(num_it)]
loss_recorder = []
if amp:
scaler = torch.cuda.amp.GradScaler()
for batch_idx, (images, targets) in enumerate(train_loader):
targets = convert_to_abs_coords(targets, images.shape)
if torch.cuda.is_available():
images = images.cuda()
targets = [{k: v.cuda() for k, v in t.items()} for t in targets]
# Forward, Backward & update
optimizer.zero_grad()
if amp:
with torch.cuda.amp.autocast():
loss_dict = model(images, targets)
train_loss = sum(v for v in loss_dict.values())
scaler.scale(train_loss).backward()
# Update the params
scaler.step(optimizer)
scaler.update()
else:
loss_dict = model(images, targets)
train_loss = sum(v for v in loss_dict.values())
train_loss.backward()
optimizer.step()
# Update LR
scheduler.step()
# Record
if not torch.isfinite(train_loss):
if batch_idx == 0:
raise ValueError("loss value is NaN or inf.")
else:
break
loss_recorder.append(train_loss.item())
# Stop after the number of iterations
if batch_idx + 1 == num_it:
break
return lr_recorder[: len(loss_recorder)], loss_recorder
def convert_to_abs_coords(targets, img_shape):
height, width = img_shape[-2:]
for idx, t in enumerate(targets):
targets[idx]["boxes"][:, 0::2] = (t["boxes"][:, 0::2] * width).round()
targets[idx]["boxes"][:, 1::2] = (t["boxes"][:, 1::2] * height).round()
targets = [
{
"boxes": torch.from_numpy(t["boxes"]).to(dtype=torch.float32),
"labels": torch.tensor(t["labels"]).to(dtype=torch.long),
}
for t in targets
]
return targets
def fit_one_epoch(model, train_loader, optimizer, scheduler, mb, amp=False):
if amp:
scaler = torch.cuda.amp.GradScaler()
model.train()
# Iterate over the batches of the dataset
for images, targets in progress_bar(train_loader, parent=mb):
targets = convert_to_abs_coords(targets, images.shape)
if torch.cuda.is_available():
images = images.cuda()
targets = [{k: v.cuda() for k, v in t.items()} for t in targets]
optimizer.zero_grad()
if amp:
with torch.cuda.amp.autocast():
loss_dict = model(images, targets)
loss = sum(v for v in loss_dict.values())
scaler.scale(loss).backward()
# Update the params
scaler.step(optimizer)
scaler.update()
else:
loss_dict = model(images, targets)
loss = sum(v for v in loss_dict.values())
loss.backward()
optimizer.step()
mb.child.comment = f"Training loss: {loss.item()}"
scheduler.step()
@torch.no_grad()
def evaluate(model, val_loader, metric, amp=False):
model.eval()
metric.reset()
for images, targets in val_loader:
targets = convert_to_abs_coords(targets, images.shape)
if torch.cuda.is_available():
images = images.cuda()
if amp:
with torch.cuda.amp.autocast():
output = model(images)
else:
output = model(images)
# Compute metric
pred_labels = np.concatenate([o["labels"].cpu().numpy() for o in output])
pred_boxes = np.concatenate([o["boxes"].cpu().numpy() for o in output])
gt_boxes = np.concatenate([o["boxes"].cpu().numpy() for o in targets])
gt_labels = np.concatenate([o["labels"].cpu().numpy() for o in targets])
metric.update(gt_boxes, pred_boxes, gt_labels, pred_labels)
return metric.summary()
def main(args):
print(args)
if args.push_to_hub:
login_to_hub()
if not isinstance(args.workers, int):
args.workers = min(16, mp.cpu_count())
torch.backends.cudnn.benchmark = True
st = time.time()
val_set = DocArtefacts(
train=False,
download=True,
img_transforms=T.Resize((args.input_size, args.input_size)),
)
val_loader = DataLoader(
val_set,
batch_size=args.batch_size,
drop_last=False,
num_workers=args.workers,
sampler=SequentialSampler(val_set),
pin_memory=torch.cuda.is_available(),
collate_fn=val_set.collate_fn,
)
print(f"Validation set loaded in {time.time() - st:.4}s ({len(val_set)} samples in " f"{len(val_loader)} batches)")
# Load doctr model
model = obj_detection.__dict__[args.arch](pretrained=args.pretrained, num_classes=5)
# Resume weights
if isinstance(args.resume, str):
print(f"Resuming {args.resume}")
checkpoint = torch.load(args.resume, map_location="cpu")
model.load_state_dict(checkpoint)
# GPU
if isinstance(args.device, int):
if not torch.cuda.is_available():
raise AssertionError("PyTorch cannot access your GPU. Please investigate!")
if args.device >= torch.cuda.device_count():
raise ValueError("Invalid device index")
# Silent default switch to GPU if available
elif torch.cuda.is_available():
args.device = 0
else:
logging.warning("No accessible GPU, target device set to CPU.")
if torch.cuda.is_available():
torch.cuda.set_device(args.device)
model = model.cuda()
# Metrics
metric = DetectionMetric(iou_thresh=0.5)
if args.test_only:
print("Running evaluation")
recall, precision, mean_iou = evaluate(model, val_loader, metric, amp=args.amp)
print(f"Recall: {recall:.2%} | Precision: {precision:.2%} |IoU: {mean_iou:.2%}")
return
st = time.time()
# Load train data generators
train_set = DocArtefacts(
train=True,
download=True,
img_transforms=Compose(
[
T.Resize((args.input_size, args.input_size)),
T.RandomApply(T.GaussianNoise(0.0, 0.25), p=0.5),
ColorJitter(brightness=0.3, contrast=0.3, saturation=0.3, hue=0.02),
T.RandomApply(GaussianBlur(kernel_size=(3, 3), sigma=(0.1, 3)), 0.3),
]
),
sample_transforms=T.RandomHorizontalFlip(p=0.5),
)
train_loader = DataLoader(
train_set,
batch_size=args.batch_size,
drop_last=True,
num_workers=args.workers,
sampler=RandomSampler(train_set),
pin_memory=torch.cuda.is_available(),
collate_fn=train_set.collate_fn,
)
print(f"Train set loaded in {time.time() - st:.4}s ({len(train_set)} samples in " f"{len(train_loader)} batches)")
if args.show_samples:
images, targets = next(iter(train_loader))
targets = convert_to_abs_coords(targets, images.shape)
plot_samples(images, targets, train_set.CLASSES)
return
# Backbone freezing
if args.freeze_backbone:
for p in model.backbone.parameters():
p.reguires_grad_(False)
# Optimizer
optimizer = optim.SGD(
[p for p in model.parameters() if p.requires_grad], lr=args.lr, weight_decay=args.weight_decay
)
# LR Finder
if args.find_lr:
lrs, losses = record_lr(model, train_loader, optimizer, amp=args.amp)
plot_recorder(lrs, losses)
return
# Scheduler
scheduler = StepLR(optimizer, step_size=8, gamma=0.7)
# Training monitoring
current_time = datetime.datetime.now().strftime("%Y%m%d-%H%M%S")
exp_name = f"{args.arch}_{current_time}" if args.name is None else args.name
# W&B
if args.wb:
run = wandb.init(
name=exp_name,
project="object-detection",
config={
"learning_rate": args.lr,
"epochs": args.epochs,
"weight_decay": args.weight_decay,
"batch_size": args.batch_size,
"architecture": args.arch,
"input_size": args.input_size,
"optimizer": "sgd",
"framework": "pytorch",
"scheduler": "step",
"pretrained": args.pretrained,
"amp": args.amp,
},
)
mb = master_bar(range(args.epochs))
max_score = 0.0
for epoch in mb:
fit_one_epoch(model, train_loader, optimizer, scheduler, mb, amp=args.amp)
# Validation loop at the end of each epoch
recall, precision, mean_iou = evaluate(model, val_loader, metric, amp=args.amp)
f1_score = 2 * precision * recall / (precision + recall) if (precision + recall) > 0 else 0.0
if f1_score > max_score:
print(f"Validation metric increased {max_score:.6} --> {f1_score:.6}: saving state...")
torch.save(model.state_dict(), f"./{exp_name}.pt")
max_score = f1_score
log_msg = f"Epoch {epoch + 1}/{args.epochs} - "
if any(val is None for val in (recall, precision, mean_iou)):
log_msg += "Undefined metric value, caused by empty GTs or predictions"
else:
log_msg += f"Recall: {recall:.2%} | Precision: {precision:.2%} | Mean IoU: {mean_iou:.2%}"
mb.write(log_msg)
# W&B
if args.wb:
wandb.log(
{
"recall": recall,
"precision": precision,
"mean_iou": mean_iou,
}
)
if args.wb:
run.finish()
if args.push_to_hub:
push_to_hf_hub(model, exp_name, task="obj_detection", run_config=args)
def parse_args():
import argparse
parser = argparse.ArgumentParser(
description="DocTR training script for object detection (PyTorch)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("arch", type=str, help="text-detection model to train")
parser.add_argument("--name", type=str, default=None, help="Name of your training experiment")
parser.add_argument("--epochs", type=int, default=20, help="number of epochs to train the model on")
parser.add_argument("-b", "--batch_size", type=int, default=2, help="batch size for training")
parser.add_argument("--device", default=None, type=int, help="device")
parser.add_argument("--input_size", type=int, default=1024, help="model input size, H = W")
parser.add_argument("--lr", type=float, default=0.001, help="learning rate for the optimizer (SGD)")
parser.add_argument("--wd", "--weight-decay", default=0, type=float, help="weight decay", dest="weight_decay")
parser.add_argument("-j", "--workers", type=int, default=None, help="number of workers used for dataloading")
parser.add_argument("--resume", type=str, default=None, help="Path to your checkpoint")
parser.add_argument("--test-only", dest="test_only", action="store_true", help="Run the validation loop")
parser.add_argument(
"--show-samples", dest="show_samples", action="store_true", help="Display unormalized training samples"
)
parser.add_argument(
"--freeze-backbone", dest="freeze_backbone", action="store_true", help="freeze model backbone for fine-tuning"
)
parser.add_argument("--wb", dest="wb", action="store_true", help="Log to Weights & Biases")
parser.add_argument("--push-to-hub", dest="push_to_hub", action="store_true", help="Push to Huggingface Hub")
parser.add_argument(
"--pretrained",
dest="pretrained",
action="store_true",
help="Load pretrained parameters before starting the training",
)
parser.add_argument("--amp", dest="amp", help="Use Automatic Mixed Precision", action="store_true")
parser.add_argument("--find-lr", action="store_true", help="Gridsearch the optimal LR")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
main(args)
|
import json
import shutil
import tempfile
from io import BytesIO
import cv2
import hdf5storage
import numpy as np
import pytest
import requests
import scipy.io as sio
from PIL import Image
from doctr.datasets.generator.base import synthesize_text_img
from doctr.io import reader
from doctr.utils import geometry
@pytest.fixture(scope="session")
def mock_vocab():
return (
"3K}7eé;5àÎYho]QwV6qU~W\"XnbBvcADfËmy.9ÔpÛ*{CôïE%M4#ÈR:g@T$x?0î£|za1ù8,OG€P-kçHëÀÂ2É/ûIJ'j"
"(LNÙFut[)èZs+&°Sd=Ï!<â_Ç>rêi`l"
)
@pytest.fixture(scope="session")
def mock_pdf(tmpdir_factory):
# Page 1
text_img = synthesize_text_img("I am a jedi!", background_color=(255, 255, 255), text_color=(0, 0, 0))
page = Image.new(text_img.mode, (1240, 1754), (255, 255, 255))
page.paste(text_img, (50, 100))
# Page 2
text_img = synthesize_text_img("No, I am your father.", background_color=(255, 255, 255), text_color=(0, 0, 0))
_page = Image.new(text_img.mode, (1240, 1754), (255, 255, 255))
_page.paste(text_img, (40, 300))
# Save the PDF
fn = tmpdir_factory.mktemp("data").join("mock_pdf_file.pdf")
page.save(str(fn), "PDF", save_all=True, append_images=[_page])
return str(fn)
@pytest.fixture(scope="session")
def mock_payslip(tmpdir_factory):
url = "https://3.bp.blogspot.com/-Es0oHTCrVEk/UnYA-iW9rYI/AAAAAAAAAFI/hWExrXFbo9U/s1600/003.jpg"
file = BytesIO(requests.get(url).content)
folder = tmpdir_factory.mktemp("data")
fn = str(folder.join("mock_payslip.jpeg"))
with open(fn, "wb") as f:
f.write(file.getbuffer())
return fn
@pytest.fixture(scope="session")
def mock_tilted_payslip(mock_payslip, tmpdir_factory):
image = reader.read_img_as_numpy(mock_payslip)
image = geometry.rotate_image(image, 30, expand=True)
tmp_path = str(tmpdir_factory.mktemp("data").join("mock_tilted_payslip.jpg"))
cv2.imwrite(tmp_path, image)
return tmp_path
@pytest.fixture(scope="session")
def mock_text_box_stream():
url = "https://doctr-static.mindee.com/models?id=v0.5.1/word-crop.png&src=0"
return requests.get(url).content
@pytest.fixture(scope="session")
def mock_text_box(mock_text_box_stream, tmpdir_factory):
file = BytesIO(mock_text_box_stream)
fn = tmpdir_factory.mktemp("data").join("mock_text_box_file.png")
with open(fn, "wb") as f:
f.write(file.getbuffer())
return str(fn)
@pytest.fixture(scope="session")
def mock_image_stream():
url = "https://miro.medium.com/max/3349/1*mk1-6aYaf_Bes1E3Imhc0A.jpeg"
return requests.get(url).content
@pytest.fixture(scope="session")
def mock_image_path(mock_image_stream, tmpdir_factory):
file = BytesIO(mock_image_stream)
folder = tmpdir_factory.mktemp("images")
fn = folder.join("mock_image_file.jpeg")
with open(fn, "wb") as f:
f.write(file.getbuffer())
return str(fn)
@pytest.fixture(scope="session")
def mock_image_folder(mock_image_stream, tmpdir_factory):
file = BytesIO(mock_image_stream)
folder = tmpdir_factory.mktemp("images")
for i in range(5):
fn = folder.join("mock_image_file_" + str(i) + ".jpeg")
with open(fn, "wb") as f:
f.write(file.getbuffer())
return str(folder)
@pytest.fixture(scope="session")
def mock_detection_label(tmpdir_factory):
folder = tmpdir_factory.mktemp("labels")
labels = {}
for idx in range(5):
labels[f"mock_image_file_{idx}.jpeg"] = {
"img_dimensions": (800, 600),
"img_hash": "dummy_hash",
"polygons": [
[[1, 2], [1, 3], [2, 1], [2, 3]],
[[10, 20], [10, 30], [20, 10], [20, 30]],
[[3, 2], [3, 3], [4, 1], [4, 3]],
[[30, 20], [30, 30], [40, 10], [40, 30]],
],
}
labels_path = folder.join("labels.json")
with open(labels_path, "w") as f:
json.dump(labels, f)
return str(labels_path)
@pytest.fixture(scope="session")
def mock_recognition_label(tmpdir_factory):
label_file = tmpdir_factory.mktemp("labels").join("labels.json")
label = {
"mock_image_file_0.jpeg": "I",
"mock_image_file_1.jpeg": "am",
"mock_image_file_2.jpeg": "a",
"mock_image_file_3.jpeg": "jedi",
"mock_image_file_4.jpeg": "!",
}
with open(label_file, "w") as f:
json.dump(label, f)
return str(label_file)
@pytest.fixture(scope="session")
def mock_ocrdataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("dataset")
label_file = root.join("labels.json")
label = {
"mock_image_file_0.jpg": {
"typed_words": [
{"value": "I", "geometry": (0.2, 0.2, 0.1, 0.1, 0)},
{"value": "am", "geometry": (0.5, 0.5, 0.1, 0.1, 0)},
]
},
"mock_image_file_1.jpg": {
"typed_words": [
{"value": "a", "geometry": (0.2, 0.2, 0.1, 0.1, 0)},
{"value": "jedi", "geometry": (0.5, 0.5, 0.1, 0.1, 0)},
]
},
"mock_image_file_2.jpg": {
"typed_words": [
{"value": "!", "geometry": (0.2, 0.2, 0.1, 0.1, 0)},
]
},
}
with open(label_file, "w") as f:
json.dump(label, f)
file = BytesIO(mock_image_stream)
image_folder = tmpdir_factory.mktemp("images")
for i in range(3):
fn = image_folder.join(f"mock_image_file_{i}.jpg")
with open(fn, "wb") as f:
f.write(file.getbuffer())
return str(image_folder), str(label_file)
@pytest.fixture(scope="session")
def mock_ic13(tmpdir_factory, mock_image_stream):
file = BytesIO(mock_image_stream)
image_folder = tmpdir_factory.mktemp("images")
label_folder = tmpdir_factory.mktemp("labels")
labels = [
"100, 100, 200, 200, 'I'\n",
"250, 300, 455, 678, 'am'\n",
"321, 485, 529, 607, 'a'\n",
"235, 121, 325, 621, 'jedi'\n",
"468, 589, 1120, 2520, '!'",
]
for i in range(5):
fn_l = label_folder.join(f"gt_mock_image_file_{i}.txt")
with open(fn_l, "w") as f:
f.writelines(labels)
fn_i = image_folder.join(f"mock_image_file_{i}.jpg")
with open(fn_i, "wb") as f:
f.write(file.getbuffer())
return str(image_folder), str(label_folder)
@pytest.fixture(scope="session")
def mock_imgur5k(tmpdir_factory, mock_image_stream):
file = BytesIO(mock_image_stream)
image_folder = tmpdir_factory.mktemp("images")
label_folder = tmpdir_factory.mktemp("dataset_info")
labels = {
"index_id": {
"YsaVkzl": {
"image_url": "https://i.imgur.com/YsaVkzl.jpg",
"image_path": "/path/to/IMGUR5K-Handwriting-Dataset/images/YsaVkzl.jpg",
"image_hash": "993a7cbb04a7c854d1d841b065948369",
},
"wz3wHhN": {
"image_url": "https://i.imgur.com/wz3wHhN.jpg",
"image_path": "/path/to/IMGUR5K-Handwriting-Dataset/images/wz3wHhN.jpg",
"image_hash": "9157426a98ee52f3e1e8d41fa3a99175",
},
"BRHSP23": {
"image_url": "https://i.imgur.com/BRHSP23.jpg",
"image_path": "/path/to/IMGUR5K-Handwriting-Dataset/images/BRHSP23.jpg",
"image_hash": "aab01f7ac82ae53845b01674e9e34167",
},
},
"index_to_ann_map": {
"YsaVkzl": ["YsaVkzl_0", "YsaVkzl_1"],
"wz3wHhN": ["wz3wHhN_0", "wz3wHhN_1"],
"BRHSP23": ["BRHSP23_0", "BRHSP23_1"],
},
"ann_id": {
"YsaVkzl_0": {"word": "I", "bounding_box": "[305.33, 850.67, 432.33, 115.33, 5.0]"},
"YsaVkzl_1": {"word": "am", "bounding_box": "[546.67, 455.67, 345.0, 212.33, 18.67]"},
"wz3wHhN_0": {"word": "a", "bounding_box": "[544.67, 345.67, 76.0, 222.33, 34.67]"},
"wz3wHhN_1": {"word": "jedi", "bounding_box": "[545.0, 437.0, 76.67, 201.0, 23.33]"},
"BRHSP23_0": {"word": "!", "bounding_box": "[555.67, 432.67, 220.0, 120.33, 7.67]"},
"BRHSP23_1": {"word": "!", "bounding_box": "[566.0, 437.0, 76.67, 201.0, 25.33]"},
},
}
label_file = label_folder.join("imgur5k_annotations.json")
with open(label_file, "w") as f:
json.dump(labels, f)
for index_id in ["YsaVkzl", "wz3wHhN", "BRHSP23"]:
fn_i = image_folder.join(f"{index_id}.jpg")
with open(fn_i, "wb") as f:
f.write(file.getbuffer())
return str(image_folder), str(label_file)
@pytest.fixture(scope="session")
def mock_svhn_dataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
svhn_root = root.mkdir("svhn")
file = BytesIO(mock_image_stream)
# ascii image names
first = np.array([[49], [46], [112], [110], [103]], dtype=np.int16) # 1.png
second = np.array([[50], [46], [112], [110], [103]], dtype=np.int16) # 2.png
third = np.array([[51], [46], [112], [110], [103]], dtype=np.int16) # 3.png
# labels: label is also ascii
label = {
"height": [35, 35, 35, 35],
"label": [1, 1, 3, 7],
"left": [116, 128, 137, 151],
"top": [27, 29, 29, 26],
"width": [15, 10, 17, 17],
}
matcontent = {"digitStruct": {"name": [first, second, third], "bbox": [label, label, label]}}
# Mock train data
train_root = svhn_root.mkdir("train")
hdf5storage.write(matcontent, filename=train_root.join("digitStruct.mat"))
for i in range(3):
fn = train_root.join(f"{i+1}.png")
with open(fn, "wb") as f:
f.write(file.getbuffer())
# Packing data into an archive to simulate the real data set and bypass archive extraction
archive_path = root.join("svhn_train.tar")
shutil.make_archive(root.join("svhn_train"), "tar", str(svhn_root))
return str(archive_path)
@pytest.fixture(scope="session")
def mock_sroie_dataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
sroie_root = root.mkdir("sroie2019_train_task1")
annotations_folder = sroie_root.mkdir("annotations")
image_folder = sroie_root.mkdir("images")
labels = [
"72, 25, 326, 25, 326, 64, 72, 64, 'I'\n",
"50, 82, 440, 82, 440, 121, 50, 121, 'am'\n",
"205, 121, 285, 121, 285, 139, 205, 139, 'a'\n",
"18, 250, 440, 320, 250, 64, 85, 121, 'jedi'\n",
"400, 112, 252, 84, 112, 84, 75, 88, '!'",
]
file = BytesIO(mock_image_stream)
for i in range(3):
fn_i = image_folder.join(f"{i}.jpg")
with open(fn_i, "wb") as f:
f.write(file.getbuffer())
fn_l = annotations_folder.join(f"{i}.txt")
with open(fn_l, "w") as f:
f.writelines(labels)
# Packing data into an archive to simulate the real data set and bypass archive extraction
archive_path = root.join("sroie2019_train_task1.zip")
shutil.make_archive(root.join("sroie2019_train_task1"), "zip", str(sroie_root))
return str(archive_path)
@pytest.fixture(scope="session")
def mock_funsd_dataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
funsd_root = root.mkdir("funsd")
sub_dataset_root = funsd_root.mkdir("dataset")
train_root = sub_dataset_root.mkdir("training_data")
image_folder = train_root.mkdir("images")
annotations_folder = train_root.mkdir("annotations")
labels = {
"form": [
{
"box": [84, 109, 136, 119],
"text": "I",
"label": "question",
"words": [{"box": [84, 109, 136, 119], "text": "I"}],
"linking": [[0, 37]],
"id": 0,
},
{
"box": [85, 110, 145, 120],
"text": "am",
"label": "answer",
"words": [{"box": [85, 110, 145, 120], "text": "am"}],
"linking": [[1, 38]],
"id": 1,
},
{
"box": [86, 115, 150, 125],
"text": "Luke",
"label": "answer",
"words": [{"box": [86, 115, 150, 125], "text": "Luke"}],
"linking": [[2, 44]],
"id": 2,
},
]
}
file = BytesIO(mock_image_stream)
for i in range(3):
fn_i = image_folder.join(f"{i}.png")
with open(fn_i, "wb") as f:
f.write(file.getbuffer())
fn_l = annotations_folder.join(f"{i}.json")
with open(fn_l, "w") as f:
json.dump(labels, f)
# Packing data into an archive to simulate the real data set and bypass archive extraction
archive_path = root.join("funsd.zip")
shutil.make_archive(root.join("funsd"), "zip", str(funsd_root))
return str(archive_path)
@pytest.fixture(scope="session")
def mock_cord_dataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
cord_root = root.mkdir("cord_train")
image_folder = cord_root.mkdir("image")
annotations_folder = cord_root.mkdir("json")
labels = {
"dontcare": [],
"valid_line": [
{
"words": [
{
"quad": {
"x2": 270,
"y3": 390,
"x3": 270,
"y4": 390,
"x1": 256,
"y1": 374,
"x4": 256,
"y2": 374,
},
"is_key": 0,
"row_id": 2179893,
"text": "I",
}
],
"category": "menu.cnt",
"group_id": 3,
},
{
"words": [
{
"quad": {
"x2": 270,
"y3": 418,
"x3": 270,
"y4": 418,
"x1": 258,
"y1": 402,
"x4": 258,
"y2": 402,
},
"is_key": 0,
"row_id": 2179894,
"text": "am",
}
],
"category": "menu.cnt",
"group_id": 4,
},
{
"words": [
{
"quad": {
"x2": 272,
"y3": 444,
"x3": 272,
"y4": 444,
"x1": 258,
"y1": 428,
"x4": 258,
"y2": 428,
},
"is_key": 0,
"row_id": 2179895,
"text": "Luke",
}
],
"category": "menu.cnt",
"group_id": 5,
},
],
}
file = BytesIO(mock_image_stream)
for i in range(3):
fn_i = image_folder.join(f"receipt_{i}.png")
with open(fn_i, "wb") as f:
f.write(file.getbuffer())
fn_l = annotations_folder.join(f"receipt_{i}.json")
with open(fn_l, "w") as f:
json.dump(labels, f)
# Packing data into an archive to simulate the real data set and bypass archive extraction
archive_path = root.join("cord_train.zip")
shutil.make_archive(root.join("cord_train"), "zip", str(cord_root))
return str(archive_path)
@pytest.fixture(scope="session")
def mock_synthtext_dataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
synthtext_root = root.mkdir("SynthText")
image_folder = synthtext_root.mkdir("8")
annotation_file = synthtext_root.join("gt.mat")
labels = {
"imnames": [[["8/ballet_106_0.jpg"], ["8/ballet_106_1.jpg"], ["8/ballet_106_2.jpg"]]],
"wordBB": [[np.random.randint(1000, size=(2, 4, 5)) for _ in range(3)]],
"txt": [np.array([["I ", "am\na ", "Jedi ", "!"] for _ in range(3)])],
}
# hacky trick to write file into a LocalPath object with scipy.io.savemat
with tempfile.NamedTemporaryFile(mode="wb", delete=True) as f:
sio.savemat(f.name, labels)
shutil.copy(f.name, str(annotation_file))
file = BytesIO(mock_image_stream)
for i in range(3):
fn_i = image_folder.join(f"ballet_106_{i}.jpg")
with open(fn_i, "wb") as f:
f.write(file.getbuffer())
# Packing data into an archive to simulate the real data set and bypass archive extraction
archive_path = root.join("SynthText.zip")
shutil.make_archive(root.join("SynthText"), "zip", str(synthtext_root))
return str(archive_path)
@pytest.fixture(scope="session")
def mock_doc_artefacts(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
doc_root = root.mkdir("artefact_detection")
labels = {
"0.jpg": [
{"geometry": [0.94375, 0.4013671875, 0.99375, 0.4365234375], "label": "bar_code"},
{"geometry": [0.03125, 0.6923828125, 0.07875, 0.7294921875], "label": "qr_code"},
{"geometry": [0.1975, 0.1748046875, 0.39875, 0.2216796875], "label": "bar_code"},
],
"1.jpg": [
{"geometry": [0.94375, 0.4013671875, 0.99375, 0.4365234375], "label": "bar_code"},
{"geometry": [0.03125, 0.6923828125, 0.07875, 0.7294921875], "label": "qr_code"},
{"geometry": [0.1975, 0.1748046875, 0.39875, 0.2216796875], "label": "background"},
],
"2.jpg": [
{"geometry": [0.94375, 0.4013671875, 0.99375, 0.4365234375], "label": "logo"},
{"geometry": [0.03125, 0.6923828125, 0.07875, 0.7294921875], "label": "qr_code"},
{"geometry": [0.1975, 0.1748046875, 0.39875, 0.2216796875], "label": "photo"},
],
}
train_root = doc_root.mkdir("train")
label_file = train_root.join("labels.json")
with open(label_file, "w") as f:
json.dump(labels, f)
image_folder = train_root.mkdir("images")
file = BytesIO(mock_image_stream)
for i in range(3):
fn = image_folder.join(f"{i}.jpg")
with open(fn, "wb") as f:
f.write(file.getbuffer())
# Packing data into an archive to simulate the real data set and bypass archive extraction
archive_path = root.join("artefact_detection.zip")
shutil.make_archive(root.join("artefact_detection"), "zip", str(doc_root))
return str(archive_path)
@pytest.fixture(scope="session")
def mock_iiit5k_dataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
iiit5k_root = root.mkdir("IIIT5K")
image_folder = iiit5k_root.mkdir("train")
annotation_file = iiit5k_root.join("trainCharBound.mat")
labels = {
"trainCharBound": {"ImgName": ["train/0.png"], "chars": ["I"], "charBB": np.random.randint(50, size=(1, 4))},
}
# hacky trick to write file into a LocalPath object with scipy.io.savemat
with tempfile.NamedTemporaryFile(mode="wb", delete=True) as f:
sio.savemat(f.name, labels)
shutil.copy(f.name, str(annotation_file))
file = BytesIO(mock_image_stream)
for i in range(1):
fn_i = image_folder.join(f"{i}.png")
with open(fn_i, "wb") as f:
f.write(file.getbuffer())
# Packing data into an archive to simulate the real data set and bypass archive extraction
archive_path = root.join("IIIT5K-Word-V3.tar")
shutil.make_archive(root.join("IIIT5K-Word-V3"), "tar", str(iiit5k_root))
return str(archive_path)
@pytest.fixture(scope="session")
def mock_svt_dataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
svt_root = root.mkdir("svt1")
labels = """<tagset></tagset>"""
with open(svt_root.join("train.xml"), "w") as f:
f.write(labels)
image_folder = svt_root.mkdir("img")
file = BytesIO(mock_image_stream)
for i in range(3):
fn = image_folder.join(f"00_0{i}.jpg")
with open(fn, "wb") as f:
f.write(file.getbuffer())
# Packing data into an archive to simulate the real data set and bypass archive extraction
archive_path = root.join("svt.zip")
shutil.make_archive(root.join("svt"), "zip", str(svt_root))
return str(archive_path)
@pytest.fixture(scope="session")
def mock_ic03_dataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
ic03_root = root.mkdir("SceneTrialTrain")
labels = """<tagset></tagset>"""
with open(ic03_root.join("words.xml"), "w") as f:
f.write(labels)
image_folder = ic03_root.mkdir("images")
file = BytesIO(mock_image_stream)
for i in range(3):
fn = image_folder.join(f"{i}.jpg")
with open(fn, "wb") as f:
f.write(file.getbuffer())
# Packing data into an archive to simulate the real data set and bypass archive extraction
archive_path = root.join("ic03_train.zip")
shutil.make_archive(root.join("ic03_train"), "zip", str(ic03_root))
return str(archive_path)
@pytest.fixture(scope="session")
def mock_mjsynth_dataset(tmpdir_factory, mock_image_stream):
root = tmpdir_factory.mktemp("datasets")
mjsynth_root = root.mkdir("mjsynth")
image_folder = mjsynth_root.mkdir("images")
label_file = mjsynth_root.join("imlist.txt")
labels = [
"./mjsynth/images/12_I_34.jpg\n",
"./mjsynth/images/12_am_34.jpg\n",
"./mjsynth/images/12_a_34.jpg\n",
"./mjsynth/images/12_Jedi_34.jpg\n",
"./mjsynth/images/12_!_34.jpg\n",
]
with open(label_file, "w") as f:
for label in labels:
f.write(label)
file = BytesIO(mock_image_stream)
for i in ["I", "am", "a", "Jedi", "!"]:
fn = image_folder.join(f"12_{i}_34.jpg")
with open(fn, "wb") as f:
f.write(file.getbuffer())
return str(root), str(label_file)
|
import os
from doctr.io import DocumentFile
from doctr.models.artefacts import BarCodeDetector, FaceDetector
def test_qr_code_detector(mock_image_folder):
detector = BarCodeDetector()
for img in os.listdir(mock_image_folder):
image = DocumentFile.from_images(os.path.join(mock_image_folder, img))[0]
barcode = detector(image)
assert len(barcode) == 0
def test_face_detector(mock_image_folder):
detector = FaceDetector(n_faces=1)
for img in os.listdir(mock_image_folder):
image = DocumentFile.from_images(os.path.join(mock_image_folder, img))[0]
faces = detector(image)
assert len(faces) <= 1
|
from PIL.ImageFont import FreeTypeFont, ImageFont
from doctr.utils.fonts import get_font
def test_get_font():
# Attempts to load recommended OS font
font = get_font()
assert isinstance(font, (ImageFont, FreeTypeFont))
|
import numpy as np
import pytest
from doctr.file_utils import CLASS_NAME
from doctr.io import Document
from doctr.io.elements import KIEDocument
from doctr.models import builder
words_per_page = 10
boxes_1 = {CLASS_NAME: np.random.rand(words_per_page, 6)} # dict format
boxes_1[CLASS_NAME][:2] *= boxes_1[CLASS_NAME][2:4]
boxes_2 = np.random.rand(words_per_page, 6) # array format
boxes_2[:2] *= boxes_2[2:4]
def test_documentbuilder():
num_pages = 2
# Don't resolve lines
doc_builder = builder.DocumentBuilder(resolve_lines=False, resolve_blocks=False)
boxes = np.random.rand(words_per_page, 6) # array format
boxes[:2] *= boxes[2:4]
# Arg consistency check
with pytest.raises(ValueError):
doc_builder([boxes, boxes], [("hello", 1.0)] * 3, [(100, 200), (100, 200)])
out = doc_builder([boxes, boxes], [[("hello", 1.0)] * words_per_page] * num_pages, [(100, 200), (100, 200)])
assert isinstance(out, Document)
assert len(out.pages) == num_pages
# 1 Block & 1 line per page
assert len(out.pages[0].blocks) == 1 and len(out.pages[0].blocks[0].lines) == 1
assert len(out.pages[0].blocks[0].lines[0].words) == words_per_page
# Resolve lines
doc_builder = builder.DocumentBuilder(resolve_lines=True, resolve_blocks=True)
out = doc_builder([boxes, boxes], [[("hello", 1.0)] * words_per_page] * num_pages, [(100, 200), (100, 200)])
# No detection
boxes = np.zeros((0, 5))
out = doc_builder([boxes, boxes], [[], []], [(100, 200), (100, 200)])
assert len(out.pages[0].blocks) == 0
# Rotated boxes to export as straight boxes
boxes = np.array(
[
[[0.1, 0.1], [0.2, 0.2], [0.15, 0.25], [0.05, 0.15]],
[[0.5, 0.5], [0.6, 0.6], [0.55, 0.65], [0.45, 0.55]],
]
)
doc_builder_2 = builder.DocumentBuilder(resolve_blocks=False, resolve_lines=False, export_as_straight_boxes=True)
out = doc_builder_2([boxes], [[("hello", 0.99), ("word", 0.99)]], [(100, 100)])
assert out.pages[0].blocks[0].lines[0].words[-1].geometry == ((0.45, 0.5), (0.6, 0.65))
# Repr
assert (
repr(doc_builder) == "DocumentBuilder(resolve_lines=True, "
"resolve_blocks=True, paragraph_break=0.035, export_as_straight_boxes=False)"
)
def test_kiedocumentbuilder():
num_pages = 2
# Don't resolve lines
doc_builder = builder.KIEDocumentBuilder(resolve_lines=False, resolve_blocks=False)
predictions = {CLASS_NAME: np.random.rand(words_per_page, 6)} # dict format
predictions[CLASS_NAME][:2] *= predictions[CLASS_NAME][2:4]
# Arg consistency check
with pytest.raises(ValueError):
doc_builder([predictions, predictions], [{CLASS_NAME: ("hello", 1.0)}] * 3, [(100, 200), (100, 200)])
out = doc_builder(
[predictions, predictions],
[{CLASS_NAME: [("hello", 1.0)] * words_per_page}] * num_pages,
[(100, 200), (100, 200)],
)
assert isinstance(out, KIEDocument)
assert len(out.pages) == num_pages
# 1 Block & 1 line per page
assert len(out.pages[0].predictions) == 1
assert len(out.pages[0].predictions[CLASS_NAME]) == words_per_page
# Resolve lines
doc_builder = builder.KIEDocumentBuilder(resolve_lines=True, resolve_blocks=True)
out = doc_builder(
[predictions, predictions],
[{CLASS_NAME: [("hello", 1.0)] * words_per_page}] * num_pages,
[(100, 200), (100, 200)],
)
# No detection
predictions = {CLASS_NAME: np.zeros((0, 5))}
out = doc_builder([predictions, predictions], [{CLASS_NAME: []}, {CLASS_NAME: []}], [(100, 200), (100, 200)])
assert len(out.pages[0].predictions[CLASS_NAME]) == 0
# Rotated boxes to export as straight boxes
predictions = {
CLASS_NAME: np.array(
[
[[0.1, 0.1], [0.2, 0.2], [0.15, 0.25], [0.05, 0.15]],
[[0.5, 0.5], [0.6, 0.6], [0.55, 0.65], [0.45, 0.55]],
]
)
}
doc_builder_2 = builder.KIEDocumentBuilder(resolve_blocks=False, resolve_lines=False, export_as_straight_boxes=True)
out = doc_builder_2([predictions], [{CLASS_NAME: [("hello", 0.99), ("word", 0.99)]}], [(100, 100)])
assert out.pages[0].predictions[CLASS_NAME][0].geometry == ((0.05, 0.1), (0.2, 0.25))
assert out.pages[0].predictions[CLASS_NAME][1].geometry == ((0.45, 0.5), (0.6, 0.65))
# Repr
assert (
repr(doc_builder) == "KIEDocumentBuilder(resolve_lines=True, "
"resolve_blocks=True, paragraph_break=0.035, export_as_straight_boxes=False)"
)
@pytest.mark.parametrize(
"input_boxes, sorted_idxs",
[
[[[0, 0.5, 0.1, 0.6], [0, 0.3, 0.2, 0.4], [0, 0, 0.1, 0.1]], [2, 1, 0]], # vertical
[[[0.7, 0.5, 0.85, 0.6], [0.2, 0.3, 0.4, 0.4], [0, 0, 0.1, 0.1]], [2, 1, 0]], # diagonal
[[[0, 0.5, 0.1, 0.6], [0.15, 0.5, 0.25, 0.6], [0.5, 0.5, 0.6, 0.6]], [0, 1, 2]], # same line, 2p
[[[0, 0.5, 0.1, 0.6], [0.2, 0.49, 0.35, 0.59], [0.8, 0.52, 0.9, 0.63]], [0, 1, 2]], # ~same line
[[[0, 0.3, 0.4, 0.45], [0.5, 0.28, 0.75, 0.42], [0, 0.45, 0.1, 0.55]], [0, 1, 2]], # 2 lines
[[[0, 0.3, 0.4, 0.35], [0.75, 0.28, 0.95, 0.42], [0, 0.45, 0.1, 0.55]], [0, 1, 2]], # 2 lines
[
[
[[0.1, 0.1], [0.2, 0.2], [0.15, 0.25], [0.05, 0.15]],
[[0.5, 0.5], [0.6, 0.6], [0.55, 0.65], [0.45, 0.55]],
],
[0, 1],
], # rot
],
)
def test_sort_boxes(input_boxes, sorted_idxs):
doc_builder = builder.DocumentBuilder()
assert doc_builder._sort_boxes(np.asarray(input_boxes))[0].tolist() == sorted_idxs
@pytest.mark.parametrize(
"input_boxes, lines",
[
[[[0, 0.5, 0.1, 0.6], [0, 0.3, 0.2, 0.4], [0, 0, 0.1, 0.1]], [[2], [1], [0]]], # vertical
[[[0.7, 0.5, 0.85, 0.6], [0.2, 0.3, 0.4, 0.4], [0, 0, 0.1, 0.1]], [[2], [1], [0]]], # diagonal
[[[0, 0.5, 0.14, 0.6], [0.15, 0.5, 0.25, 0.6], [0.5, 0.5, 0.6, 0.6]], [[0, 1], [2]]], # same line, 2p
[[[0, 0.5, 0.18, 0.6], [0.2, 0.48, 0.35, 0.58], [0.8, 0.52, 0.9, 0.63]], [[0, 1], [2]]], # ~same line
[[[0, 0.3, 0.48, 0.45], [0.5, 0.28, 0.75, 0.42], [0, 0.45, 0.1, 0.55]], [[0, 1], [2]]], # 2 lines
[[[0, 0.3, 0.4, 0.35], [0.75, 0.28, 0.95, 0.42], [0, 0.45, 0.1, 0.55]], [[0], [1], [2]]], # 2 lines
[
[
[[0.1, 0.1], [0.2, 0.2], [0.15, 0.25], [0.05, 0.15]],
[[0.5, 0.5], [0.6, 0.6], [0.55, 0.65], [0.45, 0.55]],
],
[[0], [1]],
], # rot
],
)
def test_resolve_lines(input_boxes, lines):
doc_builder = builder.DocumentBuilder()
assert doc_builder._resolve_lines(np.asarray(input_boxes)) == lines
|
from copy import deepcopy
from math import hypot
import numpy as np
import pytest
from doctr.io import DocumentFile
from doctr.utils import geometry
def test_bbox_to_polygon():
assert geometry.bbox_to_polygon(((0, 0), (1, 1))) == ((0, 0), (1, 0), (0, 1), (1, 1))
def test_polygon_to_bbox():
assert geometry.polygon_to_bbox(((0, 0), (1, 0), (0, 1), (1, 1))) == ((0, 0), (1, 1))
def test_resolve_enclosing_bbox():
assert geometry.resolve_enclosing_bbox([((0, 0.5), (1, 0)), ((0.5, 0), (1, 0.25))]) == ((0, 0), (1, 0.5))
pred = geometry.resolve_enclosing_bbox(np.array([[0.1, 0.1, 0.2, 0.2, 0.9], [0.15, 0.15, 0.2, 0.2, 0.8]]))
assert pred.all() == np.array([0.1, 0.1, 0.2, 0.2, 0.85]).all()
def test_resolve_enclosing_rbbox():
pred = geometry.resolve_enclosing_rbbox(
[
np.asarray([[0.1, 0.1], [0.2, 0.2], [0.15, 0.25], [0.05, 0.15]]),
np.asarray([[0.5, 0.5], [0.6, 0.6], [0.55, 0.65], [0.45, 0.55]]),
]
)
target1 = np.asarray([[0.55, 0.65], [0.05, 0.15], [0.1, 0.1], [0.6, 0.6]])
target2 = np.asarray([[0.05, 0.15], [0.1, 0.1], [0.6, 0.6], [0.55, 0.65]])
assert np.all(target1 - pred <= 1e-3) or np.all(target2 - pred <= 1e-3)
def test_remap_boxes():
pred = geometry.remap_boxes(
np.asarray([[[0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]]), (10, 10), (20, 20)
)
target = np.asarray([[[0.375, 0.375], [0.375, 0.625], [0.625, 0.375], [0.625, 0.625]]])
assert np.all(pred == target)
pred = geometry.remap_boxes(
np.asarray([[[0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]]), (10, 10), (20, 10)
)
target = np.asarray([[[0.25, 0.375], [0.25, 0.625], [0.75, 0.375], [0.75, 0.625]]])
assert np.all(pred == target)
with pytest.raises(ValueError):
geometry.remap_boxes(
np.asarray([[[0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]]), (80, 40, 150), (160, 40)
)
with pytest.raises(ValueError):
geometry.remap_boxes(np.asarray([[[0.25, 0.25], [0.25, 0.75], [0.75, 0.25], [0.75, 0.75]]]), (80, 40), (160,))
orig_dimension = (100, 100)
dest_dimensions = (200, 100)
# Unpack dimensions
height_o, width_o = orig_dimension
height_d, width_d = dest_dimensions
orig_box = np.asarray([[[0.25, 0.25], [0.25, 0.25], [0.75, 0.75], [0.75, 0.75]]])
pred = geometry.remap_boxes(orig_box, orig_dimension, dest_dimensions)
# Switch to absolute coords
orig = np.stack((orig_box[:, :, 0] * width_o, orig_box[:, :, 1] * height_o), axis=2)[0]
dest = np.stack((pred[:, :, 0] * width_d, pred[:, :, 1] * height_d), axis=2)[0]
len_orig = hypot(orig[0][0] - orig[2][0], orig[0][1] - orig[2][1])
len_dest = hypot(dest[0][0] - dest[2][0], dest[0][1] - dest[2][1])
assert len_orig == len_dest
alpha_orig = np.rad2deg(np.arctan((orig[0][1] - orig[2][1]) / (orig[0][0] - orig[2][0])))
alpha_dest = np.rad2deg(np.arctan((dest[0][1] - dest[2][1]) / (dest[0][0] - dest[2][0])))
assert alpha_orig == alpha_dest
def test_rotate_boxes():
boxes = np.array([[0.1, 0.1, 0.8, 0.3, 0.5]])
rboxes = np.array([[0.1, 0.1], [0.8, 0.1], [0.8, 0.3], [0.1, 0.3]])
# Angle = 0
rotated = geometry.rotate_boxes(boxes, angle=0.0, orig_shape=(1, 1))
assert np.all(rotated == rboxes)
# Angle < 1:
rotated = geometry.rotate_boxes(boxes, angle=0.5, orig_shape=(1, 1))
assert np.all(rotated == rboxes)
# Angle = 30
rotated = geometry.rotate_boxes(boxes, angle=30, orig_shape=(1, 1))
assert rotated.shape == (1, 4, 2)
boxes = np.array([[0.0, 0.0, 0.6, 0.2, 0.5]])
# Angle = -90:
rotated = geometry.rotate_boxes(boxes, angle=-90, orig_shape=(1, 1), min_angle=0)
assert np.allclose(rotated, np.array([[[1, 0.0], [1, 0.6], [0.8, 0.6], [0.8, 0.0]]]))
# Angle = 90
rotated = geometry.rotate_boxes(boxes, angle=+90, orig_shape=(1, 1), min_angle=0)
assert np.allclose(rotated, np.array([[[0, 1.0], [0, 0.4], [0.2, 0.4], [0.2, 1.0]]]))
def test_rotate_image():
img = np.ones((32, 64, 3), dtype=np.float32)
rotated = geometry.rotate_image(img, 30.0)
assert rotated.shape[:-1] == (32, 64)
assert rotated[0, 0, 0] == 0
assert rotated[0, :, 0].sum() > 1
# Expand
rotated = geometry.rotate_image(img, 30.0, expand=True)
assert rotated.shape[:-1] == (60, 120)
assert rotated[0, :, 0].sum() <= 1
# Expand
rotated = geometry.rotate_image(img, 30.0, expand=True, preserve_origin_shape=True)
assert rotated.shape[:-1] == (32, 64)
assert rotated[0, :, 0].sum() <= 1
# Expand with 90° rotation
rotated = geometry.rotate_image(img, 90.0, expand=True)
assert rotated.shape[:-1] == (64, 128)
assert rotated[0, :, 0].sum() <= 1
@pytest.mark.parametrize(
"abs_geoms, img_size, rel_geoms",
[
# Full image (boxes)
[np.array([[0, 0, 32, 32]]), (32, 32), np.array([[0, 0, 1, 1]], dtype=np.float32)],
# Full image (polygons)
[
np.array([[[0, 0], [32, 0], [32, 32], [0, 32]]]),
(32, 32),
np.array([[[0, 0], [1, 0], [1, 1], [0, 1]]], dtype=np.float32),
],
# Quarter image (boxes)
[np.array([[0, 0, 16, 16]]), (32, 32), np.array([[0, 0, 0.5, 0.5]], dtype=np.float32)],
# Quarter image (polygons)
[
np.array([[[0, 0], [16, 0], [16, 16], [0, 16]]]),
(32, 32),
np.array([[[0, 0], [0.5, 0], [0.5, 0.5], [0, 0.5]]], dtype=np.float32),
],
],
)
def test_convert_to_relative_coords(abs_geoms, img_size, rel_geoms):
assert np.all(geometry.convert_to_relative_coords(abs_geoms, img_size) == rel_geoms)
# Wrong format
with pytest.raises(ValueError):
geometry.convert_to_relative_coords(np.zeros((3, 5)), (32, 32))
def test_estimate_page_angle():
straight_polys = np.array(
[
[[0.3, 0.3], [0.4, 0.3], [0.4, 0.4], [0.3, 0.4]],
[[0.4, 0.4], [0.5, 0.4], [0.5, 0.5], [0.4, 0.5]],
[[0.5, 0.5], [0.6, 0.5], [0.6, 0.6], [0.5, 0.6]],
]
)
rotated_polys = geometry.rotate_boxes(straight_polys, angle=20, orig_shape=(512, 512))
angle = geometry.estimate_page_angle(rotated_polys)
assert np.isclose(angle, 20)
def test_extract_crops(mock_pdf): # noqa: F811
doc_img = DocumentFile.from_pdf(mock_pdf)[0]
num_crops = 2
rel_boxes = np.array(
[[idx / num_crops, idx / num_crops, (idx + 1) / num_crops, (idx + 1) / num_crops] for idx in range(num_crops)],
dtype=np.float32,
)
abs_boxes = np.array(
[
[
int(idx * doc_img.shape[1] / num_crops),
int(idx * doc_img.shape[0]) / num_crops,
int((idx + 1) * doc_img.shape[1] / num_crops),
int((idx + 1) * doc_img.shape[0] / num_crops),
]
for idx in range(num_crops)
],
dtype=np.float32,
)
with pytest.raises(AssertionError):
geometry.extract_crops(doc_img, np.zeros((1, 5)))
for boxes in (rel_boxes, abs_boxes):
croped_imgs = geometry.extract_crops(doc_img, boxes)
# Number of crops
assert len(croped_imgs) == num_crops
# Data type and shape
assert all(isinstance(crop, np.ndarray) for crop in croped_imgs)
assert all(crop.ndim == 3 for crop in croped_imgs)
# Identity
assert np.all(
doc_img == geometry.extract_crops(doc_img, np.array([[0, 0, 1, 1]], dtype=np.float32), channels_last=True)[0]
)
torch_img = np.transpose(doc_img, axes=(-1, 0, 1))
assert np.all(
torch_img
== np.transpose(
geometry.extract_crops(doc_img, np.array([[0, 0, 1, 1]], dtype=np.float32), channels_last=False)[0],
axes=(-1, 0, 1),
)
)
# No box
assert geometry.extract_crops(doc_img, np.zeros((0, 4))) == []
def test_extract_rcrops(mock_pdf): # noqa: F811
doc_img = DocumentFile.from_pdf(mock_pdf)[0]
num_crops = 2
rel_boxes = np.array(
[
[
[idx / num_crops, idx / num_crops],
[idx / num_crops + 0.1, idx / num_crops],
[idx / num_crops + 0.1, idx / num_crops + 0.1],
[idx / num_crops, idx / num_crops],
]
for idx in range(num_crops)
],
dtype=np.float32,
)
abs_boxes = deepcopy(rel_boxes)
abs_boxes[:, :, 0] *= doc_img.shape[1]
abs_boxes[:, :, 1] *= doc_img.shape[0]
abs_boxes = abs_boxes.astype(np.int)
with pytest.raises(AssertionError):
geometry.extract_rcrops(doc_img, np.zeros((1, 8)))
for boxes in (rel_boxes, abs_boxes):
croped_imgs = geometry.extract_rcrops(doc_img, boxes)
# Number of crops
assert len(croped_imgs) == num_crops
# Data type and shape
assert all(isinstance(crop, np.ndarray) for crop in croped_imgs)
assert all(crop.ndim == 3 for crop in croped_imgs)
# No box
assert geometry.extract_rcrops(doc_img, np.zeros((0, 4, 2))) == []
|
import numpy as np
import pytest
from test_io_elements import _mock_pages
from doctr.utils import visualization
def test_visualize_page():
pages = _mock_pages()
image = np.ones((300, 200, 3))
visualization.visualize_page(pages[0].export(), image, words_only=False)
visualization.visualize_page(pages[0].export(), image, words_only=True, interactive=False)
# geometry checks
with pytest.raises(ValueError):
visualization.create_obj_patch([1, 2], (100, 100))
with pytest.raises(ValueError):
visualization.create_obj_patch((1, 2), (100, 100))
with pytest.raises(ValueError):
visualization.create_obj_patch((1, 2, 3, 4, 5), (100, 100))
def test_synthesize_page():
pages = _mock_pages()
visualization.synthesize_page(pages[0].export(), draw_proba=False)
render = visualization.synthesize_page(pages[0].export(), draw_proba=True)
assert isinstance(render, np.ndarray)
assert render.shape == (*pages[0].dimensions, 3)
def test_draw_boxes():
image = np.ones((256, 256, 3), dtype=np.float32)
boxes = [
[0.1, 0.1, 0.2, 0.2],
[0.15, 0.15, 0.19, 0.2], # to suppress
[0.5, 0.5, 0.6, 0.55],
[0.55, 0.5, 0.7, 0.55], # to suppress
]
visualization.draw_boxes(boxes=np.array(boxes), image=image, block=False)
|
import numpy as np
import pytest
from doctr.datasets import utils
@pytest.mark.parametrize(
"input_str, vocab, output_str",
[
["f orêt", "latin", "foret"],
["f or êt", "french", "forêt"],
["¢¾©téØßřůž", "french", "¢■■té■■ruz"],
["Ûæëð", "french", "Û■ë■"],
["Ûæë<àð", "latin", "U■e<a■"],
["Ûm@læ5€ëð", "currency", "■■■■■■€■■"],
["Ûtë3p2ð", "digits", "■■■3■2■"],
],
)
def test_translate(input_str, vocab, output_str):
out = utils.translate(input_str, vocab, unknown_char="■")
assert out == output_str
@pytest.mark.parametrize(
"input_str",
[
"frtvorêt",
"for98€t",
"uéîUY",
"ÛAZ$£ë",
],
)
def test_encode_decode(input_str):
mapping = """3K}7eé;5àÎYho]QwV6qU~W"XnbBvcADfËmy.9ÔpÛ*{CôïE%M4#ÈR:g@T$x?0î£|
za1ù8,OG€P-kçHëÀÂ2É/ûIJ\'j(LNÙFut[)èZs+&°Sd=Ï!<â_Ç>rêi`l"""
encoded = utils.encode_string(input_str, mapping)
decoded = utils.decode_sequence(encoded, mapping)
assert decoded == input_str
def test_decode_sequence():
mapping = "abcdef"
with pytest.raises(TypeError):
utils.decode_sequence(123, mapping)
with pytest.raises(AssertionError):
utils.decode_sequence(np.array([2, 10]), mapping)
with pytest.raises(AssertionError):
utils.decode_sequence(np.array([2, 4.5]), mapping)
assert utils.decode_sequence([3, 4, 3, 4], mapping) == "dede"
@pytest.mark.parametrize(
"sequences, vocab, target_size, sos, eos, pad, dynamic_len, error, out_shape, gts",
[
[["cba"], "abcdef", None, None, 1, None, False, True, (1, 3), [[2, 1, 0]]], # eos in vocab
[["cba", "a"], "abcdef", None, None, -1, None, False, False, (2, 4), [[2, 1, 0, -1], [0, -1, -1, -1]]],
[["cba", "a"], "abcdef", None, None, 6, None, False, False, (2, 4), [[2, 1, 0, 6], [0, 6, 6, 6]]],
[["cba", "a"], "abcdef", 2, None, -1, None, False, False, (2, 2), [[2, 1], [0, -1]]],
[["cba", "a"], "abcdef", 4, None, -1, None, False, False, (2, 4), [[2, 1, 0, -1], [0, -1, -1, -1]]],
[["cba", "a"], "abcdef", 5, 7, -1, None, False, False, (2, 5), [[7, 2, 1, 0, -1], [7, 0, -1, -1, -1]]],
[["cba", "a"], "abcdef", 6, 7, -1, None, True, False, (2, 5), [[7, 2, 1, 0, -1], [7, 0, -1, -1, -1]]],
[["cba", "a"], "abcdef", None, 7, -1, 9, False, False, (2, 6), [[7, 2, 1, 0, -1, 9], [7, 0, -1, 9, 9, 9]]],
],
)
def test_encode_sequences(sequences, vocab, target_size, sos, eos, pad, dynamic_len, error, out_shape, gts):
if error:
with pytest.raises(ValueError):
utils.encode_sequences(sequences, vocab, target_size, eos, sos, pad, dynamic_len)
else:
out = utils.encode_sequences(sequences, vocab, target_size, eos, sos, pad, dynamic_len)
assert isinstance(out, np.ndarray)
assert out.shape == out_shape
assert np.all(out == np.asarray(gts)), print(out, gts)
# NOTE: main test in test_utils_geometry.py
@pytest.mark.parametrize(
"target",
[
# Boxes
{"boxes": np.random.rand(3, 4), "labels": ["a", "b", "c"]},
# Polygons
{"boxes": np.random.rand(3, 4, 2), "labels": ["a", "b", "c"]},
],
)
def test_convert_target_to_relative(target, mock_image_stream):
img = np.array([[3, 32, 128]]) # ImageTensor
back_img, target = utils.convert_target_to_relative(img, target)
assert img.all() == back_img.all()
assert (target["boxes"].all() >= 0) & (target["boxes"].all() <= 1)
# NOTE: main test in test_utils_geometry.py (extract_rcrops, extract_crops)
@pytest.mark.parametrize(
"geoms",
[
# Boxes
np.random.randint(low=1, high=20, size=(3, 4)),
# Polygons
np.random.randint(low=1, high=20, size=(3, 4, 2)),
],
)
def test_crop_bboxes_from_image(geoms, mock_image_path):
num_crops = 3
with pytest.raises(ValueError):
utils.crop_bboxes_from_image(mock_image_path, geoms=np.zeros((3, 1)))
with pytest.raises(FileNotFoundError):
utils.crop_bboxes_from_image("123", geoms=np.zeros((2, 4)))
cropped_imgs = utils.crop_bboxes_from_image(mock_image_path, geoms=geoms)
# Number of crops
assert len(cropped_imgs) == num_crops
# Data type and shape
assert all(isinstance(crop, np.ndarray) for crop in cropped_imgs)
assert all(crop.ndim == 3 for crop in cropped_imgs)
|
from pathlib import Path
import numpy as np
import pytest
from doctr import datasets
def test_visiondataset():
url = "https://data.deepai.org/mnist.zip"
with pytest.raises(ValueError):
datasets.datasets.VisionDataset(url, download=False)
dataset = datasets.datasets.VisionDataset(url, download=True, extract_archive=True)
assert len(dataset) == 0
assert repr(dataset) == "VisionDataset()"
def test_abstractdataset(mock_image_path):
with pytest.raises(ValueError):
datasets.datasets.AbstractDataset("my/fantasy/folder")
# Check transforms
path = Path(mock_image_path)
ds = datasets.datasets.AbstractDataset(path.parent)
# Check target format
with pytest.raises(AssertionError):
ds.data = [(path.name, 0)]
img, target = ds[0]
with pytest.raises(AssertionError):
ds.data = [(path.name, dict(boxes=np.array([[0, 0, 1, 1]])))]
img, target = ds[0]
with pytest.raises(AssertionError):
ds.data = [(ds.data[0][0], {"label": "A"})]
img, target = ds[0]
# Patch some data
ds.data = [(path.name, np.array([0]))]
# Fetch the img
img, target = ds[0]
assert isinstance(target, np.ndarray) and target == np.array([0])
# Check img_transforms
ds.img_transforms = lambda x: 1 - x
img2, target2 = ds[0]
assert np.all(img2.numpy() == 1 - img.numpy())
assert target == target2
# Check sample_transforms
ds.img_transforms = None
ds.sample_transforms = lambda x, y: (x, y + 1)
img3, target3 = ds[0]
assert np.all(img3.numpy() == img.numpy()) and (target3 == (target + 1))
# Check inplace modifications
ds.data = [(ds.data[0][0], "A")]
def inplace_transfo(x, target):
target += "B"
return x, target
ds.sample_transforms = inplace_transfo
_, t = ds[0]
_, t = ds[0]
assert t == "AB"
|
import numpy as np
import pytest
from doctr.models.detection.differentiable_binarization.base import DBPostProcessor
from doctr.models.detection.linknet.base import LinkNetPostProcessor
def test_dbpostprocessor():
postprocessor = DBPostProcessor(assume_straight_pages=True)
r_postprocessor = DBPostProcessor(assume_straight_pages=False)
with pytest.raises(AssertionError):
postprocessor(np.random.rand(2, 512, 512).astype(np.float32))
mock_batch = np.random.rand(2, 512, 512, 1).astype(np.float32)
out = postprocessor(mock_batch)
r_out = r_postprocessor(mock_batch)
# Batch composition
assert isinstance(out, list)
assert len(out) == 2
assert all(isinstance(sample, list) and all(isinstance(v, np.ndarray) for v in sample) for sample in out)
assert all(all(v.shape[1] == 5 for v in sample) for sample in out)
assert all(all(v.shape[1] == 4 and v.shape[2] == 2 for v in sample) for sample in r_out)
# Relative coords
assert all(all(np.all(np.logical_and(v[:, :4] >= 0, v[:, :4] <= 1)) for v in sample) for sample in out)
assert all(all(np.all(np.logical_and(v[:, :4] >= 0, v[:, :4] <= 1)) for v in sample) for sample in r_out)
# Repr
assert repr(postprocessor) == "DBPostProcessor(bin_thresh=0.3, box_thresh=0.1)"
# Edge case when the expanded points of the polygon has two lists
issue_points = np.array(
[
[869, 561],
[923, 581],
[925, 595],
[915, 583],
[889, 583],
[905, 593],
[882, 601],
[901, 595],
[904, 604],
[876, 608],
[915, 614],
[911, 605],
[925, 601],
[930, 616],
[911, 617],
[900, 636],
[931, 637],
[904, 649],
[932, 649],
[932, 628],
[918, 627],
[934, 624],
[935, 573],
[909, 569],
[934, 562],
],
dtype=np.int32,
)
out = postprocessor.polygon_to_box(issue_points)
r_out = r_postprocessor.polygon_to_box(issue_points)
assert isinstance(out, tuple) and len(out) == 4
assert isinstance(r_out, np.ndarray) and r_out.shape == (4, 2)
def test_linknet_postprocessor():
postprocessor = LinkNetPostProcessor()
r_postprocessor = LinkNetPostProcessor(assume_straight_pages=False)
with pytest.raises(AssertionError):
postprocessor(np.random.rand(2, 512, 512).astype(np.float32))
mock_batch = np.random.rand(2, 512, 512, 1).astype(np.float32)
out = postprocessor(mock_batch)
r_out = r_postprocessor(mock_batch)
# Batch composition
assert isinstance(out, list)
assert len(out) == 2
assert all(isinstance(sample, list) and all(isinstance(v, np.ndarray) for v in sample) for sample in out)
assert all(all(v.shape[1] == 5 for v in sample) for sample in out)
assert all(all(v.shape[1] == 4 and v.shape[2] == 2 for v in sample) for sample in r_out)
# Relative coords
assert all(all(np.all(np.logical_and(v[:4] >= 0, v[:4] <= 1)) for v in sample) for sample in out)
|
import doctr
def test_version():
assert len(doctr.__version__.split(".")) == 3
def test_is_tf_available():
assert doctr.is_tf_available()
def test_is_torch_available():
assert not doctr.is_torch_available()
|
import os
from pathlib import PosixPath
from unittest.mock import patch
import pytest
from doctr.utils.data import download_from_url
@patch("doctr.utils.data._urlretrieve")
@patch("pathlib.Path.mkdir")
@patch.dict(os.environ, {"HOME": "/"}, clear=True)
def test_download_from_url(mkdir_mock, urlretrieve_mock):
download_from_url("test_url")
urlretrieve_mock.assert_called_with("test_url", PosixPath("/.cache/doctr/test_url"))
@patch.dict(os.environ, {"DOCTR_CACHE_DIR": "/test"}, clear=True)
@patch("doctr.utils.data._urlretrieve")
@patch("pathlib.Path.mkdir")
def test_download_from_url_customizing_cache_dir(mkdir_mock, urlretrieve_mock):
download_from_url("test_url")
urlretrieve_mock.assert_called_with("test_url", PosixPath("/test/test_url"))
@patch.dict(os.environ, {"HOME": "/"}, clear=True)
@patch("pathlib.Path.mkdir", side_effect=OSError)
@patch("logging.error")
def test_download_from_url_error_creating_directory(logging_mock, mkdir_mock):
with pytest.raises(OSError):
download_from_url("test_url")
logging_mock.assert_called_with(
"Failed creating cache direcotry at /.cache/doctr."
" You can change default cache directory using 'DOCTR_CACHE_DIR' environment variable if needed."
)
@patch.dict(os.environ, {"HOME": "/", "DOCTR_CACHE_DIR": "/test"}, clear=True)
@patch("pathlib.Path.mkdir", side_effect=OSError)
@patch("logging.error")
def test_download_from_url_error_creating_directory_with_env_var(logging_mock, mkdir_mock):
with pytest.raises(OSError):
download_from_url("test_url")
logging_mock.assert_called_with(
"Failed creating cache direcotry at /test using path from 'DOCTR_CACHE_DIR' environment variable."
)
|
from io import BytesIO
import numpy as np
import pytest
import requests
from doctr import io
def _check_doc_content(doc_tensors, num_pages):
# 1 doc of 8 pages
assert len(doc_tensors) == num_pages
assert all(isinstance(page, np.ndarray) for page in doc_tensors)
assert all(page.dtype == np.uint8 for page in doc_tensors)
def test_read_pdf(mock_pdf):
doc = io.read_pdf(mock_pdf)
_check_doc_content(doc, 2)
with open(mock_pdf, "rb") as f:
doc = io.read_pdf(f.read())
_check_doc_content(doc, 2)
# Wrong input type
with pytest.raises(TypeError):
_ = io.read_pdf(123)
# Wrong path
with pytest.raises(FileNotFoundError):
_ = io.read_pdf("my_imaginary_file.pdf")
def test_read_img_as_numpy(tmpdir_factory, mock_pdf):
# Wrong input type
with pytest.raises(TypeError):
_ = io.read_img_as_numpy(123)
# Non-existing file
with pytest.raises(FileNotFoundError):
io.read_img_as_numpy("my_imaginary_file.jpg")
# Invalid image
with pytest.raises(ValueError):
io.read_img_as_numpy(str(mock_pdf))
# From path
url = "https://doctr-static.mindee.com/models?id=v0.2.1/Grace_Hopper.jpg&src=0"
file = BytesIO(requests.get(url).content)
tmp_path = str(tmpdir_factory.mktemp("data").join("mock_img_file.jpg"))
with open(tmp_path, "wb") as f:
f.write(file.getbuffer())
# Path & stream
with open(tmp_path, "rb") as f:
page_stream = io.read_img_as_numpy(f.read())
for page in (io.read_img_as_numpy(tmp_path), page_stream):
# Data type
assert isinstance(page, np.ndarray)
assert page.dtype == np.uint8
# Shape
assert page.shape == (606, 517, 3)
# RGB
bgr_page = io.read_img_as_numpy(tmp_path, rgb_output=False)
assert np.all(page == bgr_page[..., ::-1])
# Resize
target_size = (200, 150)
resized_page = io.read_img_as_numpy(tmp_path, target_size)
assert resized_page.shape[:2] == target_size
def test_read_html():
url = "https://www.google.com"
pdf_stream = io.read_html(url)
assert isinstance(pdf_stream, bytes)
def test_document_file(mock_pdf, mock_image_stream):
pages = io.DocumentFile.from_images(mock_image_stream)
_check_doc_content(pages, 1)
assert isinstance(io.DocumentFile.from_pdf(mock_pdf), list)
assert isinstance(io.DocumentFile.from_url("https://www.google.com"), list)
def test_pdf(mock_pdf):
pages = io.DocumentFile.from_pdf(mock_pdf)
# As images
num_pages = 2
_check_doc_content(pages, num_pages)
|
import pytest
from doctr.models.recognition.utils import merge_multi_strings, merge_strings
@pytest.mark.parametrize(
"a, b, merged",
[
["abc", "def", "abcdef"],
["abcd", "def", "abcdef"],
["abcde", "def", "abcdef"],
["abcdef", "def", "abcdef"],
["abcccc", "cccccc", "abcccccccc"],
],
)
def test_merge_strings(a, b, merged):
assert merged == merge_strings(a, b, 1.4)
@pytest.mark.parametrize(
"seq_list, merged",
[
[["abc", "def"], "abcdef"],
[["abcd", "def", "efgh", "ijk"], "abcdefghijk"],
[["abcdi", "defk", "efghi", "aijk"], "abcdefghijk"],
],
)
def test_merge_multi_strings(seq_list, merged):
assert merged == merge_multi_strings(seq_list, 1.4)
|
import numpy as np
import pytest
from doctr.utils import metrics
@pytest.mark.parametrize(
"gt, pred, raw, caseless, unidecode, unicase",
[
[["grass", "56", "True", "EUR"], ["grass", "56", "true", "€"], 0.5, 0.75, 0.75, 1],
[["éléphant", "ça"], ["elephant", "ca"], 0, 0, 1, 1],
],
)
def test_text_match(gt, pred, raw, caseless, unidecode, unicase):
metric = metrics.TextMatch()
with pytest.raises(AssertionError):
metric.summary()
with pytest.raises(AssertionError):
metric.update(["a", "b"], ["c"])
metric.update(gt, pred)
assert metric.summary() == dict(raw=raw, caseless=caseless, unidecode=unidecode, unicase=unicase)
metric.reset()
assert metric.raw == metric.caseless == metric.unidecode == metric.unicase == metric.total == 0
@pytest.mark.parametrize(
"box1, box2, iou, abs_tol",
[
[[[0, 0, 0.5, 0.5]], [[0, 0, 0.5, 0.5]], 1, 0], # Perfect match
[[[0, 0, 0.5, 0.5]], [[0.5, 0.5, 1, 1]], 0, 0], # No match
[[[0, 0, 1, 1]], [[0.5, 0.5, 1, 1]], 0.25, 0], # Partial match
[[[0.2, 0.2, 0.6, 0.6]], [[0.4, 0.4, 0.8, 0.8]], 4 / 28, 1e-7], # Partial match
[[[0, 0, 0.1, 0.1]], [[0.9, 0.9, 1, 1]], 0, 0], # Boxes far from each other
[np.zeros((0, 4)), [[0, 0, 0.5, 0.5]], 0, 0], # Zero-sized inputs
[[[0, 0, 0.5, 0.5]], np.zeros((0, 4)), 0, 0], # Zero-sized inputs
],
)
def test_box_iou(box1, box2, iou, abs_tol):
iou_mat = metrics.box_iou(np.asarray(box1), np.asarray(box2))
assert iou_mat.shape == (len(box1), len(box2))
if iou_mat.size > 0:
assert abs(iou_mat - iou) <= abs_tol
@pytest.mark.parametrize(
"mask1, mask2, iou, abs_tol",
[
[
[[[True, True, False], [True, True, False]]],
[[[True, True, False], [True, True, False]]],
1,
0,
], # Perfect match
[
[[[True, False, False], [False, False, False]]],
[[[True, True, False], [True, True, False]]],
0.25,
0,
], # Partial match
],
)
def test_mask_iou(mask1, mask2, iou, abs_tol):
iou_mat = metrics.mask_iou(np.asarray(mask1), np.asarray(mask2))
assert iou_mat.shape == (len(mask1), len(mask2))
if iou_mat.size > 0:
assert abs(iou_mat - iou) <= abs_tol
# Incompatible spatial shapes
with pytest.raises(AssertionError):
metrics.mask_iou(np.zeros((2, 3, 5), dtype=bool), np.ones((3, 2, 5), dtype=bool))
@pytest.mark.parametrize(
"rbox1, rbox2, iou, abs_tol",
[
[[[[0, 0], [0.5, 0], [0.5, 0.5], [0, 0.5]]], [[[0, 0], [0.5, 0], [0.5, 0.5], [0, 0.5]]], 1, 0], # Perfect match
[[[[0, 0], [0.5, 0], [0.5, 0.5], [0, 0.5]]], [[[0.5, 0.5], [1, 0.5], [1, 1], [0.5, 1]]], 0, 1e-4], # No match
[
[[[0, 0], [1.0, 0], [1.0, 1.0], [0, 1.0]]],
[[[0.5, 0.5], [1, 0.5], [1.0, 1.0], [0.5, 1]]],
0.25,
5e-3,
], # Partial match
[
[[[0.2, 0.2], [0.6, 0.2], [0.6, 0.6], [0.2, 0.6]]],
[[[0.4, 0.4], [0.8, 0.4], [0.8, 0.8], [0.4, 0.8]]],
4 / 28,
7e-3,
], # Partial match
[
[[[0, 0], [0.05, 0], [0.05, 0.05], [0, 0.05]]],
[[[0.5, 0.5], [1, 0.5], [1, 1], [0.5, 1]]],
0,
0,
], # Boxes far from each other
[np.zeros((0, 4, 2)), [[[0, 0], [0.05, 0], [0.05, 0.05], [0, 0.05]]], 0, 0], # Zero-sized inputs
[[[[0, 0], [0.05, 0], [0.05, 0.05], [0, 0.05]]], np.zeros((0, 4, 2)), 0, 0], # Zero-sized inputs
],
)
def test_polygon_iou(rbox1, rbox2, iou, abs_tol):
mask_shape = (256, 256)
iou_mat = metrics.polygon_iou(np.asarray(rbox1), np.asarray(rbox2), mask_shape)
assert iou_mat.shape == (len(rbox1), len(rbox2))
if iou_mat.size > 0:
assert abs(iou_mat - iou) <= abs_tol
# Ensure broadcasting doesn't change the result
iou_matbis = metrics.polygon_iou(np.asarray(rbox1), np.asarray(rbox2), mask_shape, use_broadcasting=False)
assert np.all((iou_mat - iou_matbis) <= 1e-7)
# Incorrect boxes
with pytest.raises(AssertionError):
metrics.polygon_iou(np.zeros((2, 5), dtype=float), np.ones((3, 4), dtype=float), mask_shape)
@pytest.mark.parametrize(
"box, shape, mask",
[
[
[[0, 0], [0.5, 0], [0.5, 0.5], [0, 0.5]],
(2, 2),
[[True, False], [False, False]],
],
],
)
def test_rbox_to_mask(box, shape, mask):
masks = metrics.rbox_to_mask(np.asarray(box)[None, ...], shape)
assert masks.shape == (1, *shape)
assert np.all(masks[0] == np.asarray(mask, dtype=bool))
@pytest.mark.parametrize(
"gts, preds, iou_thresh, recall, precision, mean_iou",
[
[[[[0, 0, 0.5, 0.5]]], [[[0, 0, 0.5, 0.5]]], 0.5, 1, 1, 1], # Perfect match
[[[[0, 0, 1, 1]]], [[[0, 0, 0.5, 0.5], [0.6, 0.6, 0.7, 0.7]]], 0.2, 1, 0.5, 0.13], # Bad match
[[[[0, 0, 1, 1]]], [[[0, 0, 0.5, 0.5], [0.6, 0.6, 0.7, 0.7]]], 0.5, 0, 0, 0.13], # Bad match
[
[[[0, 0, 0.5, 0.5]], [[0, 0, 0.5, 0.5]]],
[[[0, 0, 0.5, 0.5]], None],
0.5,
0.5,
1,
1,
], # No preds on 2nd sample
],
)
def test_localization_confusion(gts, preds, iou_thresh, recall, precision, mean_iou):
metric = metrics.LocalizationConfusion(iou_thresh)
for _gts, _preds in zip(gts, preds):
metric.update(np.asarray(_gts), np.zeros((0, 4)) if _preds is None else np.asarray(_preds))
assert metric.summary() == (recall, precision, mean_iou)
metric.reset()
assert metric.num_gts == metric.num_preds == metric.matches == metric.tot_iou == 0
@pytest.mark.parametrize(
"gts, preds, iou_thresh, recall, precision, mean_iou",
[
[
[[[[0.05, 0.05], [0.15, 0.05], [0.15, 0.15], [0.05, 0.15]]]],
[[[[0.05, 0.05], [0.15, 0.05], [0.15, 0.15], [0.05, 0.15]]]],
0.5,
1,
1,
1,
], # Perfect match
[
[[[[0.1, 0.05], [0.2, 0.05], [0.2, 0.15], [0.1, 0.15]]]],
[[[[0.1, 0.05], [0.3, 0.05], [0.3, 0.15], [0.1, 0.15]], [[0.6, 0.6], [0.8, 0.6], [0.8, 0.8], [0.6, 0.8]]]],
0.2,
1,
0.5,
0.25,
], # Bad match
[
[
[[[0.05, 0.05], [0.15, 0.05], [0.15, 0.15], [0.05, 0.15]]],
[[[0.25, 0.25], [0.35, 0.25], [35, 0.35], [0.25, 0.35]]],
],
[[[[0.05, 0.05], [0.15, 0.05], [0.15, 0.15], [0.05, 0.15]]], None],
0.5,
0.5,
1,
1,
], # Empty
],
)
def test_r_localization_confusion(gts, preds, iou_thresh, recall, precision, mean_iou):
metric = metrics.LocalizationConfusion(iou_thresh, use_polygons=True, mask_shape=(1000, 1000))
for _gts, _preds in zip(gts, preds):
metric.update(np.asarray(_gts), np.zeros((0, 5)) if _preds is None else np.asarray(_preds))
assert metric.summary()[:2] == (recall, precision)
assert abs(metric.summary()[2] - mean_iou) <= 5e-3
metric.reset()
assert metric.num_gts == metric.num_preds == metric.matches == metric.tot_iou == 0
@pytest.mark.parametrize(
"gt_boxes, gt_words, pred_boxes, pred_words, iou_thresh, recall, precision, mean_iou",
[
[ # Perfect match
[[[0, 0, 0.5, 0.5]]],
[["elephant"]],
[[[0, 0, 0.5, 0.5]]],
[["elephant"]],
0.5,
{"raw": 1, "caseless": 1, "unidecode": 1, "unicase": 1},
{"raw": 1, "caseless": 1, "unidecode": 1, "unicase": 1},
1,
],
[ # Bad match
[[[0, 0, 0.5, 0.5]]],
[["elefant"]],
[[[0, 0, 0.5, 0.5]]],
[["elephant"]],
0.5,
{"raw": 0, "caseless": 0, "unidecode": 0, "unicase": 0},
{"raw": 0, "caseless": 0, "unidecode": 0, "unicase": 0},
1,
],
[ # Good match
[[[0, 0, 1, 1]]],
[["EUR"]],
[[[0, 0, 0.5, 0.5], [0.6, 0.6, 0.7, 0.7]]],
[["€", "e"]],
0.2,
{"raw": 0, "caseless": 0, "unidecode": 1, "unicase": 1},
{"raw": 0, "caseless": 0, "unidecode": 0.5, "unicase": 0.5},
0.13,
],
[ # No preds on 2nd sample
[[[0, 0, 0.5, 0.5]], [[0, 0, 0.5, 0.5]]],
[["Elephant"], ["elephant"]],
[[[0, 0, 0.5, 0.5]], None],
[["elephant"], []],
0.5,
{"raw": 0, "caseless": 0.5, "unidecode": 0, "unicase": 0.5},
{"raw": 0, "caseless": 1, "unidecode": 0, "unicase": 1},
1,
],
],
)
def test_ocr_metric(gt_boxes, gt_words, pred_boxes, pred_words, iou_thresh, recall, precision, mean_iou):
metric = metrics.OCRMetric(iou_thresh)
for _gboxes, _gwords, _pboxes, _pwords in zip(gt_boxes, gt_words, pred_boxes, pred_words):
metric.update(
np.asarray(_gboxes), np.zeros((0, 4)) if _pboxes is None else np.asarray(_pboxes), _gwords, _pwords
)
_recall, _precision, _mean_iou = metric.summary()
assert _recall == recall
assert _precision == precision
assert _mean_iou == mean_iou
metric.reset()
assert metric.num_gts == metric.num_preds == metric.tot_iou == 0
assert metric.raw_matches == metric.caseless_matches == metric.unidecode_matches == metric.unicase_matches == 0
# Shape check
with pytest.raises(AssertionError):
metric.update(
np.asarray(_gboxes),
np.zeros((0, 4)),
_gwords,
["I", "have", "a", "bad", "feeling", "about", "this"],
)
@pytest.mark.parametrize(
"gt_boxes, gt_classes, pred_boxes, pred_classes, iou_thresh, recall, precision, mean_iou",
[
[ # Perfect match
[[[0, 0, 0.5, 0.5]]],
[[0]],
[[[0, 0, 0.5, 0.5]]],
[[0]],
0.5,
1,
1,
1,
],
[ # Bad match
[[[0, 0, 0.5, 0.5]]],
[[0]],
[[[0, 0, 0.5, 0.5]]],
[[1]],
0.5,
0,
0,
1,
],
[ # No preds on 2nd sample
[[[0, 0, 0.5, 0.5]], [[0, 0, 0.5, 0.5]]],
[[0], [1]],
[[[0, 0, 0.5, 0.5]], None],
[[0], []],
0.5,
0.5,
1,
1,
],
],
)
def test_detection_metric(gt_boxes, gt_classes, pred_boxes, pred_classes, iou_thresh, recall, precision, mean_iou):
metric = metrics.DetectionMetric(iou_thresh)
for _gboxes, _gclasses, _pboxes, _pclasses in zip(gt_boxes, gt_classes, pred_boxes, pred_classes):
metric.update(
np.asarray(_gboxes),
np.zeros((0, 4)) if _pboxes is None else np.asarray(_pboxes),
np.array(_gclasses, dtype=np.int64),
np.array(_pclasses, dtype=np.int64),
)
_recall, _precision, _mean_iou = metric.summary()
assert _recall == recall
assert _precision == precision
assert _mean_iou == mean_iou
metric.reset()
assert metric.num_gts == metric.num_preds == metric.tot_iou == 0
assert metric.num_matches == 0
# Shape check
with pytest.raises(AssertionError):
metric.update(
np.asarray(_gboxes), np.zeros((0, 4)), np.array(_gclasses, dtype=np.int64), np.array([1, 2], dtype=np.int64)
)
def test_nms():
boxes = [
[0.1, 0.1, 0.2, 0.2, 0.95],
[0.15, 0.15, 0.19, 0.2, 0.90], # to suppress
[0.5, 0.5, 0.6, 0.55, 0.90],
[0.55, 0.5, 0.7, 0.55, 0.85], # to suppress
]
to_keep = metrics.nms(np.asarray(boxes), thresh=0.2)
assert to_keep == [0, 2]
def test_box_ioa():
boxes = [
[0.1, 0.1, 0.2, 0.2],
[0.15, 0.15, 0.2, 0.2],
]
mat = metrics.box_ioa(np.array(boxes), np.array(boxes))
assert mat[1, 0] == mat[0, 0] == mat[1, 1] == 1.0
assert abs(mat[0, 1] - 0.25) <= 1e-7
|
from io import BytesIO
import cv2
import numpy as np
import pytest
import requests
from doctr.io import reader
from doctr.models._utils import estimate_orientation, get_bitmap_angle, get_language, invert_data_structure
from doctr.utils import geometry
@pytest.fixture(scope="function")
def mock_image(tmpdir_factory):
url = "https://doctr-static.mindee.com/models?id=v0.2.1/bitmap30.png&src=0"
file = BytesIO(requests.get(url).content)
tmp_path = str(tmpdir_factory.mktemp("data").join("mock_bitmap.jpg"))
with open(tmp_path, "wb") as f:
f.write(file.getbuffer())
image = reader.read_img_as_numpy(tmp_path)
return image
@pytest.fixture(scope="function")
def mock_bitmap(mock_image):
bitmap = np.squeeze(cv2.cvtColor(mock_image, cv2.COLOR_BGR2GRAY) / 255.0)
return bitmap
def test_get_bitmap_angle(mock_bitmap):
angle = get_bitmap_angle(mock_bitmap)
assert abs(angle - 30.0) < 1.0
def test_estimate_orientation(mock_image, mock_tilted_payslip):
assert estimate_orientation(mock_image * 0) == 0
angle = estimate_orientation(mock_image)
assert abs(angle - 30.0) < 1.0
rotated = geometry.rotate_image(mock_image, -angle)
angle_rotated = estimate_orientation(rotated)
assert abs(angle_rotated) < 1.0
mock_tilted_payslip = reader.read_img_as_numpy(mock_tilted_payslip)
assert (estimate_orientation(mock_tilted_payslip) - 30.0) < 1.0
rotated = geometry.rotate_image(mock_tilted_payslip, -30, expand=True)
angle_rotated = estimate_orientation(rotated)
assert abs(angle_rotated) < 1.0
def test_get_lang():
sentence = "This is a test sentence."
expected_lang = "en"
threshold_prob = 0.99
lang = get_language(sentence)
assert lang[0] == expected_lang
assert lang[1] > threshold_prob
lang = get_language("a")
assert lang[0] == "unknown"
assert lang[1] == 0.0
def test_convert_list_dict():
dic = {"k1": [[0], [0], [0]], "k2": [[1], [1], [1]]}
L = [{"k1": [0], "k2": [1]}, {"k1": [0], "k2": [1]}, {"k1": [0], "k2": [1]}]
converted_dic = invert_data_structure(dic)
converted_list = invert_data_structure(L)
assert converted_dic == L
assert converted_list == dic
|
import numpy as np
import pytest
from doctr.transforms import modules as T
from doctr.transforms.functional.base import expand_line
def test_imagetransform():
transfo = T.ImageTransform(lambda x: 1 - x)
assert transfo(0, 1) == (1, 1)
def test_samplecompose():
transfos = [lambda x, y: (1 - x, y), lambda x, y: (x, 2 * y)]
transfo = T.SampleCompose(transfos)
assert transfo(0, 1) == (1, 2)
def test_oneof():
transfos = [lambda x: 1 - x, lambda x: x + 10]
transfo = T.OneOf(transfos)
out = transfo(1)
assert out == 0 or out == 11
def test_randomapply():
transfo = T.RandomApply(lambda x: 1 - x)
out = transfo(1)
assert out == 0 or out == 1
assert repr(transfo).endswith(", p=0.5)")
@pytest.mark.parametrize(
"line",
[
# Horizontal
np.array([[63, 1], [42, 1]]).astype(np.int32),
# Vertical
np.array([[1, 63], [1, 42]]).astype(np.int32),
# Normal
np.array([[1, 63], [12, 42]]).astype(np.int32),
],
)
def test_expand_line(line):
out = expand_line(line, (100, 100))
assert isinstance(out, tuple)
assert all(isinstance(val, (float, int, np.int32, np.float64)) and val >= 0 for val in out)
|
import numpy as np
import pytest
from doctr.models.recognition.predictor._utils import remap_preds, split_crops
@pytest.mark.parametrize(
"crops, max_ratio, target_ratio, dilation, channels_last, num_crops",
[
# No split required
[[np.zeros((32, 128, 3), dtype=np.uint8)], 8, 4, 1.4, True, 1],
[[np.zeros((3, 32, 128), dtype=np.uint8)], 8, 4, 1.4, False, 1],
# Split required
[[np.zeros((32, 1024, 3), dtype=np.uint8)], 8, 6, 1.4, True, 5],
[[np.zeros((3, 32, 1024), dtype=np.uint8)], 8, 6, 1.4, False, 5],
],
)
def test_split_crops(crops, max_ratio, target_ratio, dilation, channels_last, num_crops):
new_crops, crop_map, should_remap = split_crops(crops, max_ratio, target_ratio, dilation, channels_last)
assert len(new_crops) == num_crops
assert len(crop_map) == len(crops)
assert should_remap == (len(crops) != len(new_crops))
@pytest.mark.parametrize(
"preds, crop_map, dilation, pred",
[
# Nothing to remap
[[("hello", 0.5)], [0], 1.4, [("hello", 0.5)]],
# Merge
[[("hellowo", 0.5), ("loworld", 0.6)], [(0, 2)], 1.4, [("helloworld", 0.5)]],
],
)
def test_remap_preds(preds, crop_map, dilation, pred):
preds = remap_preds(preds, crop_map, dilation)
assert len(preds) == len(pred)
assert preds == pred
assert all(isinstance(pred, tuple) for pred in preds)
assert all(isinstance(pred[0], str) and isinstance(pred[1], float) for pred in preds)
|
import os
from multiprocessing.pool import ThreadPool
from unittest.mock import patch
import pytest
from doctr.utils.multithreading import multithread_exec
@pytest.mark.parametrize(
"input_seq, func, output_seq",
[
[[1, 2, 3], lambda x: 2 * x, [2, 4, 6]],
[[1, 2, 3], lambda x: x**2, [1, 4, 9]],
[
["this is", "show me", "I know"],
lambda x: x + " the way",
["this is the way", "show me the way", "I know the way"],
],
],
)
def test_multithread_exec(input_seq, func, output_seq):
assert list(multithread_exec(func, input_seq)) == output_seq
assert list(multithread_exec(func, input_seq, 0)) == output_seq
@patch.dict(os.environ, {"DOCTR_MULTIPROCESSING_DISABLE": "TRUE"}, clear=True)
def test_multithread_exec_multiprocessing_disable():
with patch.object(ThreadPool, "map") as mock_tp_map:
multithread_exec(lambda x: x, [1, 2])
assert not mock_tp_map.called
|
"""Test for python files copyright headers."""
from datetime import datetime
from pathlib import Path
def test_copyright_header():
copyright_header = "".join(
[
f"# Copyright (C) {2021}-{datetime.now().year}, Mindee.\n\n",
"# This program is licensed under the Apache License 2.0.\n",
"# See LICENSE or go to <https://opensource.org/licenses/Apache-2.0> for full license details.\n",
]
)
excluded_files = ["__init__.py", "version.py"]
invalid_files = []
locations = [".github", "api/app", "demo", "docs", "doctr", "references", "scripts"]
for location in locations:
for source_path in Path(__file__).parent.parent.parent.joinpath(location).rglob("*.py"):
if source_path.name not in excluded_files:
source_path_content = source_path.read_text()
if copyright_header not in source_path_content:
invalid_files.append(source_path)
assert len(invalid_files) == 0, f"Invalid copyright header in the following files: {invalid_files}"
|
from xml.etree.ElementTree import ElementTree
import numpy as np
import pytest
from doctr.file_utils import CLASS_NAME
from doctr.io import elements
def _mock_words(size=(1.0, 1.0), offset=(0, 0), confidence=0.9):
return [
elements.Word(
"hello", confidence, ((offset[0], offset[1]), (size[0] / 2 + offset[0], size[1] / 2 + offset[1]))
),
elements.Word(
"world",
confidence,
((size[0] / 2 + offset[0], size[1] / 2 + offset[1]), (size[0] + offset[0], size[1] + offset[1])),
),
]
def _mock_artefacts(size=(1, 1), offset=(0, 0), confidence=0.8):
sub_size = (size[0] / 2, size[1] / 2)
return [
elements.Artefact(
"qr_code", confidence, ((offset[0], offset[1]), (sub_size[0] + offset[0], sub_size[1] + offset[1]))
),
elements.Artefact(
"qr_code",
confidence,
((sub_size[0] + offset[0], sub_size[1] + offset[1]), (size[0] + offset[0], size[1] + offset[1])),
),
]
def _mock_lines(size=(1, 1), offset=(0, 0)):
sub_size = (size[0] / 2, size[1] / 2)
return [
elements.Line(_mock_words(size=sub_size, offset=offset)),
elements.Line(_mock_words(size=sub_size, offset=(offset[0] + sub_size[0], offset[1] + sub_size[1]))),
]
def _mock_prediction(size=(1.0, 1.0), offset=(0, 0), confidence=0.9):
return [
elements.Prediction(
"hello", confidence, ((offset[0], offset[1]), (size[0] / 2 + offset[0], size[1] / 2 + offset[1]))
),
elements.Prediction(
"world",
confidence,
((size[0] / 2 + offset[0], size[1] / 2 + offset[1]), (size[0] + offset[0], size[1] + offset[1])),
),
]
def _mock_blocks(size=(1, 1), offset=(0, 0)):
sub_size = (size[0] / 4, size[1] / 4)
return [
elements.Block(
_mock_lines(size=sub_size, offset=offset),
_mock_artefacts(size=sub_size, offset=(offset[0] + sub_size[0], offset[1] + sub_size[1])),
),
elements.Block(
_mock_lines(size=sub_size, offset=(offset[0] + 2 * sub_size[0], offset[1] + 2 * sub_size[1])),
_mock_artefacts(size=sub_size, offset=(offset[0] + 3 * sub_size[0], offset[1] + 3 * sub_size[1])),
),
]
def _mock_pages(block_size=(1, 1), block_offset=(0, 0)):
return [
elements.Page(
_mock_blocks(block_size, block_offset),
0,
(300, 200),
{"value": 0.0, "confidence": 1.0},
{"value": "EN", "confidence": 0.8},
),
elements.Page(
_mock_blocks(block_size, block_offset),
1,
(500, 1000),
{"value": 0.15, "confidence": 0.8},
{"value": "FR", "confidence": 0.7},
),
]
def _mock_kie_pages(prediction_size=(1, 1), prediction_offset=(0, 0)):
return [
elements.KIEPage(
{CLASS_NAME: _mock_prediction(prediction_size, prediction_offset)},
0,
(300, 200),
{"value": 0.0, "confidence": 1.0},
{"value": "EN", "confidence": 0.8},
),
elements.KIEPage(
{CLASS_NAME: _mock_prediction(prediction_size, prediction_offset)},
1,
(500, 1000),
{"value": 0.15, "confidence": 0.8},
{"value": "FR", "confidence": 0.7},
),
]
def test_element():
with pytest.raises(KeyError):
elements.Element(sub_elements=[1])
def test_word():
word_str = "hello"
conf = 0.8
geom = ((0, 0), (1, 1))
word = elements.Word(word_str, conf, geom)
# Attribute checks
assert word.value == word_str
assert word.confidence == conf
assert word.geometry == geom
# Render
assert word.render() == word_str
# Export
assert word.export() == {"value": word_str, "confidence": conf, "geometry": geom}
# Repr
assert word.__repr__() == f"Word(value='hello', confidence={conf:.2})"
# Class method
state_dict = {"value": "there", "confidence": 0.1, "geometry": ((0, 0), (0.5, 0.5))}
word = elements.Word.from_dict(state_dict)
assert word.export() == state_dict
def test_line():
geom = ((0, 0), (0.5, 0.5))
words = _mock_words(size=geom[1], offset=geom[0])
line = elements.Line(words)
# Attribute checks
assert len(line.words) == len(words)
assert all(isinstance(w, elements.Word) for w in line.words)
assert line.geometry == geom
# Render
assert line.render() == "hello world"
# Export
assert line.export() == {"words": [w.export() for w in words], "geometry": geom}
# Repr
words_str = " " * 4 + ",\n ".join(repr(word) for word in words) + ","
assert line.__repr__() == f"Line(\n (words): [\n{words_str}\n ]\n)"
# Ensure that words repr does't span on several lines when there are none
assert repr(elements.Line([], ((0, 0), (1, 1)))) == "Line(\n (words): []\n)"
# from dict
state_dict = {
"words": [{"value": "there", "confidence": 0.1, "geometry": ((0, 0), (1.0, 1.0))}],
"geometry": ((0, 0), (1.0, 1.0)),
}
line = elements.Line.from_dict(state_dict)
assert line.export() == state_dict
def test_artefact():
artefact_type = "qr_code"
conf = 0.8
geom = ((0, 0), (1, 1))
artefact = elements.Artefact(artefact_type, conf, geom)
# Attribute checks
assert artefact.type == artefact_type
assert artefact.confidence == conf
assert artefact.geometry == geom
# Render
assert artefact.render() == "[QR_CODE]"
# Export
assert artefact.export() == {"type": artefact_type, "confidence": conf, "geometry": geom}
# Repr
assert artefact.__repr__() == f"Artefact(type='{artefact_type}', confidence={conf:.2})"
def test_prediction():
prediction_str = "hello"
conf = 0.8
geom = ((0, 0), (1, 1))
prediction = elements.Prediction(prediction_str, conf, geom)
# Attribute checks
assert prediction.value == prediction_str
assert prediction.confidence == conf
assert prediction.geometry == geom
# Render
assert prediction.render() == prediction_str
# Export
assert prediction.export() == {"value": prediction_str, "confidence": conf, "geometry": geom}
# Repr
assert prediction.__repr__() == f"Prediction(value='hello', confidence={conf:.2}, bounding_box={geom})"
# Class method
state_dict = {"value": "there", "confidence": 0.1, "geometry": ((0, 0), (0.5, 0.5))}
prediction = elements.Prediction.from_dict(state_dict)
assert prediction.export() == state_dict
def test_block():
geom = ((0, 0), (1, 1))
sub_size = (geom[1][0] / 2, geom[1][0] / 2)
lines = _mock_lines(size=sub_size, offset=geom[0])
artefacts = _mock_artefacts(size=sub_size, offset=sub_size)
block = elements.Block(lines, artefacts)
# Attribute checks
assert len(block.lines) == len(lines)
assert len(block.artefacts) == len(artefacts)
assert all(isinstance(w, elements.Line) for w in block.lines)
assert all(isinstance(a, elements.Artefact) for a in block.artefacts)
assert block.geometry == geom
# Render
assert block.render() == "hello world\nhello world"
# Export
assert block.export() == {
"lines": [line.export() for line in lines],
"artefacts": [artefact.export() for artefact in artefacts],
"geometry": geom,
}
def test_page():
page_idx = 0
page_size = (300, 200)
orientation = {"value": 0.0, "confidence": 0.0}
language = {"value": "EN", "confidence": 0.8}
blocks = _mock_blocks()
page = elements.Page(blocks, page_idx, page_size, orientation, language)
# Attribute checks
assert len(page.blocks) == len(blocks)
assert all(isinstance(b, elements.Block) for b in page.blocks)
assert page.page_idx == page_idx
assert page.dimensions == page_size
assert page.orientation == orientation
assert page.language == language
# Render
assert page.render() == "hello world\nhello world\n\nhello world\nhello world"
# Export
assert page.export() == {
"blocks": [b.export() for b in blocks],
"page_idx": page_idx,
"dimensions": page_size,
"orientation": orientation,
"language": language,
}
# Export XML
assert (
isinstance(page.export_as_xml(), tuple)
and isinstance(page.export_as_xml()[0], (bytes, bytearray))
and isinstance(page.export_as_xml()[1], ElementTree)
)
# Repr
assert "\n".join(repr(page).split("\n")[:2]) == f"Page(\n dimensions={repr(page_size)}"
# Show
page.show(np.zeros((256, 256, 3), dtype=np.uint8), block=False)
# Synthesize
img = page.synthesize()
assert isinstance(img, np.ndarray)
assert img.shape == (*page_size, 3)
def test_kiepage():
page_idx = 0
page_size = (300, 200)
orientation = {"value": 0.0, "confidence": 0.0}
language = {"value": "EN", "confidence": 0.8}
predictions = {CLASS_NAME: _mock_prediction()}
kie_page = elements.KIEPage(predictions, page_idx, page_size, orientation, language)
# Attribute checks
assert len(kie_page.predictions) == len(predictions)
assert all(isinstance(b, elements.Prediction) for b in kie_page.predictions[CLASS_NAME])
assert kie_page.page_idx == page_idx
assert kie_page.dimensions == page_size
assert kie_page.orientation == orientation
assert kie_page.language == language
# Render
assert kie_page.render() == "words: hello\n\nwords: world"
# Export
assert kie_page.export() == {
"predictions": {CLASS_NAME: [b.export() for b in predictions[CLASS_NAME]]},
"page_idx": page_idx,
"dimensions": page_size,
"orientation": orientation,
"language": language,
}
# Export XML
assert (
isinstance(kie_page.export_as_xml(), tuple)
and isinstance(kie_page.export_as_xml()[0], (bytes, bytearray))
and isinstance(kie_page.export_as_xml()[1], ElementTree)
)
# Repr
assert "\n".join(repr(kie_page).split("\n")[:2]) == f"KIEPage(\n dimensions={repr(page_size)}"
# Show
kie_page.show(np.zeros((256, 256, 3), dtype=np.uint8), block=False)
# Synthesize
img = kie_page.synthesize()
assert isinstance(img, np.ndarray)
assert img.shape == (*page_size, 3)
def test_document():
pages = _mock_pages()
doc = elements.Document(pages)
# Attribute checks
assert len(doc.pages) == len(pages)
assert all(isinstance(p, elements.Page) for p in doc.pages)
# Render
page_export = "hello world\nhello world\n\nhello world\nhello world"
assert doc.render() == f"{page_export}\n\n\n\n{page_export}"
# Export
assert doc.export() == {"pages": [p.export() for p in pages]}
# Export XML
assert isinstance(doc.export_as_xml(), list) and len(doc.export_as_xml()) == len(pages)
# Show
doc.show([np.zeros((256, 256, 3), dtype=np.uint8) for _ in range(len(pages))], block=False)
# Synthesize
img_list = doc.synthesize()
assert isinstance(img_list, list) and len(img_list) == len(pages)
def test_kie_document():
pages = _mock_kie_pages()
doc = elements.KIEDocument(pages)
# Attribute checks
assert len(doc.pages) == len(pages)
assert all(isinstance(p, elements.KIEPage) for p in doc.pages)
# Render
page_export = "words: hello\n\nwords: world"
assert doc.render() == f"{page_export}\n\n\n\n{page_export}"
# Export
assert doc.export() == {"pages": [p.export() for p in pages]}
# Export XML
assert isinstance(doc.export_as_xml(), list) and len(doc.export_as_xml()) == len(pages)
# Show
doc.show([np.zeros((256, 256, 3), dtype=np.uint8) for _ in range(len(pages))], block=False)
# Synthesize
img_list = doc.synthesize()
assert isinstance(img_list, list) and len(img_list) == len(pages)
|
from doctr.file_utils import is_tf_available
def test_file_utils():
assert is_tf_available()
|
import math
import numpy as np
import pytest
import tensorflow as tf
from doctr import transforms as T
from doctr.transforms.functional import crop_detection, rotate_sample
def test_resize():
output_size = (32, 32)
transfo = T.Resize(output_size)
input_t = tf.cast(tf.fill([64, 64, 3], 1), dtype=tf.float32)
out = transfo(input_t)
assert tf.reduce_all(out == 1)
assert out.shape[:2] == output_size
assert repr(transfo) == f"Resize(output_size={output_size}, method='bilinear')"
transfo = T.Resize(output_size, preserve_aspect_ratio=True)
input_t = tf.cast(tf.fill([32, 64, 3], 1), dtype=tf.float32)
out = transfo(input_t)
assert not tf.reduce_all(out == 1)
# Asymetric padding
assert tf.reduce_all(out[-1] == 0) and tf.reduce_all(out[0] == 1)
assert out.shape[:2] == output_size
# Symetric padding
transfo = T.Resize(output_size, preserve_aspect_ratio=True, symmetric_pad=True)
assert repr(transfo) == (
f"Resize(output_size={output_size}, method='bilinear', " f"preserve_aspect_ratio=True, symmetric_pad=True)"
)
out = transfo(input_t)
# Asymetric padding
assert tf.reduce_all(out[-1] == 0) and tf.reduce_all(out[0] == 0)
# Inverse aspect ratio
input_t = tf.cast(tf.fill([64, 32, 3], 1), dtype=tf.float32)
out = transfo(input_t)
assert not tf.reduce_all(out == 1)
assert out.shape[:2] == output_size
# FP16
input_t = tf.cast(tf.fill([64, 64, 3], 1), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
def test_compose():
output_size = (16, 16)
transfo = T.Compose([T.Resize((32, 32)), T.Resize(output_size)])
input_t = tf.random.uniform(shape=[64, 64, 3], minval=0, maxval=1)
out = transfo(input_t)
assert out.shape[:2] == output_size
assert len(repr(transfo).split("\n")) == 6
@pytest.mark.parametrize(
"input_shape",
[
[8, 32, 32, 3],
[32, 32, 3],
[32, 3],
],
)
def test_normalize(input_shape):
mean, std = [0.5, 0.5, 0.5], [0.5, 0.5, 0.5]
transfo = T.Normalize(mean, std)
input_t = tf.cast(tf.fill(input_shape, 1), dtype=tf.float32)
out = transfo(input_t)
assert tf.reduce_all(out == 1)
assert repr(transfo) == f"Normalize(mean={mean}, std={std})"
# FP16
input_t = tf.cast(tf.fill(input_shape, 1), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
def test_lambatransformation():
transfo = T.LambdaTransformation(lambda x: x / 2)
input_t = tf.cast(tf.fill([8, 32, 32, 3], 1), dtype=tf.float32)
out = transfo(input_t)
assert tf.reduce_all(out == 0.5)
def test_togray():
transfo = T.ToGray()
r = tf.fill([8, 32, 32, 1], 0.2)
g = tf.fill([8, 32, 32, 1], 0.6)
b = tf.fill([8, 32, 32, 1], 0.7)
input_t = tf.cast(tf.concat([r, g, b], axis=-1), dtype=tf.float32)
out = transfo(input_t)
assert tf.reduce_all(out <= 0.51)
assert tf.reduce_all(out >= 0.49)
# FP16
input_t = tf.cast(tf.concat([r, g, b], axis=-1), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
@pytest.mark.parametrize(
"rgb_min",
[
0.2,
0.4,
0.6,
],
)
def test_invert_colorize(rgb_min):
transfo = T.ColorInversion(min_val=rgb_min)
input_t = tf.cast(tf.fill([8, 32, 32, 3], 1), dtype=tf.float32)
out = transfo(input_t)
assert tf.reduce_all(out <= 1 - rgb_min + 1e-4)
assert tf.reduce_all(out >= 0)
input_t = tf.cast(tf.fill([8, 32, 32, 3], 255), dtype=tf.uint8)
out = transfo(input_t)
assert tf.reduce_all(out <= int(math.ceil(255 * (1 - rgb_min))))
assert tf.reduce_all(out >= 0)
# FP16
input_t = tf.cast(tf.fill([8, 32, 32, 3], 1), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
def test_brightness():
transfo = T.RandomBrightness(max_delta=0.1)
input_t = tf.cast(tf.fill([8, 32, 32, 3], 0.5), dtype=tf.float32)
out = transfo(input_t)
assert tf.reduce_all(out >= 0.4)
assert tf.reduce_all(out <= 0.6)
# FP16
input_t = tf.cast(tf.fill([8, 32, 32, 3], 0.5), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
def test_contrast():
transfo = T.RandomContrast(delta=0.2)
input_t = tf.cast(tf.fill([8, 32, 32, 3], 0.5), dtype=tf.float32)
out = transfo(input_t)
assert tf.reduce_all(out == 0.5)
# FP16
if any(tf.config.list_physical_devices("GPU")):
input_t = tf.cast(tf.fill([8, 32, 32, 3], 0.5), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
def test_saturation():
transfo = T.RandomSaturation(delta=0.2)
input_t = tf.cast(tf.fill([8, 32, 32, 3], 0.5), dtype=tf.float32)
input_t = tf.image.hsv_to_rgb(input_t)
out = transfo(input_t)
hsv = tf.image.rgb_to_hsv(out)
assert tf.reduce_all(hsv[:, :, :, 1] >= 0.4)
assert tf.reduce_all(hsv[:, :, :, 1] <= 0.6)
# FP16
if any(tf.config.list_physical_devices("GPU")):
input_t = tf.cast(tf.fill([8, 32, 32, 3], 0.5), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
def test_hue():
transfo = T.RandomHue(max_delta=0.2)
input_t = tf.cast(tf.fill([8, 32, 32, 3], 0.5), dtype=tf.float32)
input_t = tf.image.hsv_to_rgb(input_t)
out = transfo(input_t)
hsv = tf.image.rgb_to_hsv(out)
assert tf.reduce_all(hsv[:, :, :, 0] <= 0.7)
assert tf.reduce_all(hsv[:, :, :, 0] >= 0.3)
# FP16
if any(tf.config.list_physical_devices("GPU")):
input_t = tf.cast(tf.fill([8, 32, 32, 3], 0.5), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
def test_gamma():
transfo = T.RandomGamma(min_gamma=1.0, max_gamma=2.0, min_gain=0.8, max_gain=1.0)
input_t = tf.cast(tf.fill([8, 32, 32, 3], 2.0), dtype=tf.float32)
out = transfo(input_t)
assert tf.reduce_all(out >= 1.6)
assert tf.reduce_all(out <= 4.0)
# FP16
input_t = tf.cast(tf.fill([8, 32, 32, 3], 2.0), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
def test_jpegquality():
transfo = T.RandomJpegQuality(min_quality=50)
input_t = tf.cast(tf.fill([32, 32, 3], 1), dtype=tf.float32)
out = transfo(input_t)
assert out.shape == input_t.shape
# FP16
input_t = tf.cast(tf.fill([32, 32, 3], 1), dtype=tf.float16)
out = transfo(input_t)
assert out.dtype == tf.float16
def test_rotate_sample():
img = tf.ones((200, 100, 3), dtype=tf.float32)
boxes = np.array([0, 0, 100, 200])[None, ...]
polys = np.stack((boxes[..., [0, 1]], boxes[..., [2, 1]], boxes[..., [2, 3]], boxes[..., [0, 3]]), axis=1)
rel_boxes = np.array([0, 0, 1, 1], dtype=np.float32)[None, ...]
rel_polys = np.stack(
(rel_boxes[..., [0, 1]], rel_boxes[..., [2, 1]], rel_boxes[..., [2, 3]], rel_boxes[..., [0, 3]]), axis=1
)
# No angle
rotated_img, rotated_geoms = rotate_sample(img, boxes, 0, False)
assert tf.math.reduce_all(rotated_img == img) and np.all(rotated_geoms == rel_polys)
rotated_img, rotated_geoms = rotate_sample(img, boxes, 0, True)
assert tf.math.reduce_all(rotated_img == img) and np.all(rotated_geoms == rel_polys)
rotated_img, rotated_geoms = rotate_sample(img, polys, 0, False)
assert tf.math.reduce_all(rotated_img == img) and np.all(rotated_geoms == rel_polys)
rotated_img, rotated_geoms = rotate_sample(img, polys, 0, True)
assert tf.math.reduce_all(rotated_img == img) and np.all(rotated_geoms == rel_polys)
# No expansion
expected_img = np.zeros((200, 100, 3), dtype=np.float32)
expected_img[50:150] = 1
expected_img = tf.convert_to_tensor(expected_img)
expected_polys = np.array([[0, 0.75], [0, 0.25], [1, 0.25], [1, 0.75]])[None, ...]
rotated_img, rotated_geoms = rotate_sample(img, boxes, 90, False)
assert tf.math.reduce_all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, polys, 90, False)
assert tf.math.reduce_all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, rel_boxes, 90, False)
assert tf.math.reduce_all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, rel_polys, 90, False)
assert tf.math.reduce_all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
# Expansion
expected_img = tf.ones((100, 200, 3), dtype=tf.float32)
expected_polys = np.array([[0, 1], [0, 0], [1, 0], [1, 1]], dtype=np.float32)[None, ...]
rotated_img, rotated_geoms = rotate_sample(img, boxes, 90, True)
# import ipdb; ipdb.set_trace()
assert tf.math.reduce_all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, polys, 90, True)
assert tf.math.reduce_all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, rel_boxes, 90, True)
assert tf.math.reduce_all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
rotated_img, rotated_geoms = rotate_sample(img, rel_polys, 90, True)
assert tf.math.reduce_all(rotated_img == expected_img) and np.all(rotated_geoms == expected_polys)
with pytest.raises(AssertionError):
rotate_sample(img, boxes[None, ...], 90, False)
def test_random_rotate():
rotator = T.RandomRotate(max_angle=10.0, expand=False)
input_t = tf.ones((50, 50, 3), dtype=tf.float32)
boxes = np.array([[15, 20, 35, 30]])
r_img, r_boxes = rotator(input_t, boxes)
assert r_img.shape == input_t.shape
rotator = T.RandomRotate(max_angle=10.0, expand=True)
r_img, r_boxes = rotator(input_t, boxes)
assert r_img.shape != input_t.shape
# FP16
input_t = tf.ones((50, 50, 3), dtype=tf.float16)
r_img, _ = rotator(input_t, boxes)
assert r_img.dtype == tf.float16
def test_crop_detection():
img = tf.ones((50, 50, 3), dtype=tf.float32)
abs_boxes = np.array(
[
[15, 20, 35, 30],
[5, 10, 10, 20],
]
)
crop_box = (12 / 50, 23 / 50, 1.0, 1.0)
c_img, c_boxes = crop_detection(img, abs_boxes, crop_box)
assert c_img.shape == (26, 37, 3)
assert c_boxes.shape == (1, 4)
assert np.all(c_boxes == np.array([15 - 12, 0, 35 - 12, 30 - 23])[None, ...])
rel_boxes = np.array(
[
[0.3, 0.4, 0.7, 0.6],
[0.1, 0.2, 0.2, 0.4],
]
)
c_img, c_boxes = crop_detection(img, rel_boxes, crop_box)
assert c_img.shape == (26, 37, 3)
assert c_boxes.shape == (1, 4)
assert np.abs(c_boxes - np.array([0.06 / 0.76, 0.0, 0.46 / 0.76, 0.14 / 0.54])[None, ...]).mean() < 1e-7
# FP16
img = tf.ones((50, 50, 3), dtype=tf.float16)
c_img, _ = crop_detection(img, rel_boxes, crop_box)
assert c_img.dtype == tf.float16
with pytest.raises(AssertionError):
crop_detection(img, abs_boxes, (2, 6, 24, 56))
def test_random_crop():
transfo = T.RandomCrop(scale=(0.5, 1.0), ratio=(0.75, 1.33))
input_t = tf.ones((50, 50, 3), dtype=tf.float32)
boxes = np.array([[15, 20, 35, 30]])
img, target = transfo(input_t, dict(boxes=boxes))
# Check the scale (take a margin)
assert img.shape[0] * img.shape[1] >= 0.4 * input_t.shape[0] * input_t.shape[1]
# Check aspect ratio (take a margin)
assert 0.65 <= img.shape[0] / img.shape[1] <= 1.5
# Check the target
assert np.all(target["boxes"] >= 0)
assert np.all(target["boxes"][:, [0, 2]] <= img.shape[1]) and np.all(target["boxes"][:, [1, 3]] <= img.shape[0])
def test_gaussian_blur():
blur = T.GaussianBlur(3, (0.1, 3))
input_t = np.ones((31, 31, 3), dtype=np.float32)
input_t[15, 15] = 0
blur_img = blur(tf.convert_to_tensor(input_t)).numpy()
assert blur_img.shape == input_t.shape
assert np.all(blur_img[15, 15] > 0)
@pytest.mark.parametrize(
"input_dtype, input_size",
[
[tf.float32, (32, 32, 3)],
[tf.uint8, (32, 32, 3)],
],
)
def test_channel_shuffle(input_dtype, input_size):
transfo = T.ChannelShuffle()
input_t = tf.random.uniform(input_size, dtype=tf.float32)
if input_dtype == tf.uint8:
input_t = tf.math.round(255 * input_t)
input_t = tf.cast(input_t, dtype=input_dtype)
out = transfo(input_t)
assert isinstance(out, tf.Tensor)
assert out.shape == input_size
assert out.dtype == input_dtype
# Ensure that nothing has changed apart from channel order
assert tf.math.reduce_all(tf.math.reduce_sum(input_t, -1) == tf.math.reduce_sum(out, -1))
@pytest.mark.parametrize(
"input_dtype,input_shape",
[
[tf.float32, (32, 32, 3)],
[tf.uint8, (32, 32, 3)],
],
)
def test_gaussian_noise(input_dtype, input_shape):
transform = T.GaussianNoise(0.0, 1.0)
input_t = tf.random.uniform(input_shape, dtype=tf.float32)
if input_dtype == tf.uint8:
input_t = tf.math.round((255 * input_t))
input_t = tf.cast(input_t, dtype=input_dtype)
transformed = transform(input_t)
assert isinstance(transformed, tf.Tensor)
assert transformed.shape == input_shape
assert transformed.dtype == input_dtype
assert tf.math.reduce_any(transformed != input_t)
assert tf.math.reduce_all(transformed >= 0)
if input_dtype == tf.uint8:
assert tf.reduce_all(transformed <= 255)
else:
assert tf.reduce_all(transformed <= 1.0)
@pytest.mark.parametrize("p", [1, 0])
def test_randomhorizontalflip(p):
# testing for 2 cases, with flip probability 1 and 0.
transform = T.RandomHorizontalFlip(p)
input_t = np.ones((32, 32, 3))
input_t[:, :16, :] = 0
input_t = tf.convert_to_tensor(input_t)
target = {"boxes": np.array([[0.1, 0.1, 0.3, 0.4]], dtype=np.float32), "labels": np.ones(1, dtype=np.int64)}
transformed, _target = transform(input_t, target)
assert isinstance(transformed, tf.Tensor)
assert transformed.shape == input_t.shape
assert transformed.dtype == input_t.dtype
# integrity check of targets
assert isinstance(_target, dict)
assert all(isinstance(val, np.ndarray) for val in _target.values())
assert _target["boxes"].dtype == np.float32
assert _target["labels"].dtype == np.int64
if p == 1:
assert np.all(_target["boxes"] == np.array([[0.7, 0.1, 0.9, 0.4]], dtype=np.float32))
assert tf.reduce_all(
tf.math.reduce_mean(transformed, (0, 2)) == tf.constant([1] * 16 + [0] * 16, dtype=tf.float64)
)
elif p == 0:
assert np.all(_target["boxes"] == np.array([[0.1, 0.1, 0.3, 0.4]], dtype=np.float32))
assert tf.reduce_all(
tf.math.reduce_mean(transformed, (0, 2)) == tf.constant([0] * 16 + [1] * 16, dtype=tf.float64)
)
assert np.all(_target["labels"] == np.ones(1, dtype=np.int64))
@pytest.mark.parametrize(
"input_dtype,input_shape",
[
[tf.float32, (32, 32, 3)],
[tf.uint8, (32, 32, 3)],
[tf.float32, (64, 32, 3)],
[tf.uint8, (64, 32, 3)],
],
)
def test_random_shadow(input_dtype, input_shape):
transform = T.RandomShadow((0.2, 0.8))
input_t = tf.random.uniform(input_shape, dtype=tf.float32)
if input_dtype == tf.uint8:
input_t = tf.math.round((255 * input_t))
input_t = tf.cast(input_t, dtype=input_dtype)
transformed = transform(input_t)
assert isinstance(transformed, tf.Tensor)
assert transformed.shape == input_shape
assert transformed.dtype == input_dtype
# The shadow will darken the picture
assert tf.math.reduce_mean(input_t) >= tf.math.reduce_mean(transformed)
assert tf.math.reduce_all(transformed >= 0)
if input_dtype == tf.uint8:
assert tf.reduce_all(transformed <= 255)
else:
assert tf.reduce_all(transformed <= 1.0)
|
import os
import shutil
import tempfile
import numpy as np
import onnxruntime
import pytest
import tensorflow as tf
from doctr.io import DocumentFile
from doctr.models import recognition
from doctr.models.preprocessor import PreProcessor
from doctr.models.recognition.crnn.tensorflow import CTCPostProcessor
from doctr.models.recognition.master.tensorflow import MASTERPostProcessor
from doctr.models.recognition.predictor import RecognitionPredictor
from doctr.models.recognition.sar.tensorflow import SARPostProcessor
from doctr.models.recognition.vitstr.tensorflow import ViTSTRPostProcessor
from doctr.models.utils import export_model_to_onnx
from doctr.utils.geometry import extract_crops
@pytest.mark.parametrize(
"arch_name, input_shape",
[
["crnn_vgg16_bn", (32, 128, 3)],
["crnn_mobilenet_v3_small", (32, 128, 3)],
["crnn_mobilenet_v3_large", (32, 128, 3)],
["sar_resnet31", (32, 128, 3)],
["master", (32, 128, 3)],
["vitstr_small", (32, 128, 3)],
["vitstr_base", (32, 128, 3)],
],
)
def test_recognition_models(arch_name, input_shape):
batch_size = 4
reco_model = recognition.__dict__[arch_name](pretrained=True, input_shape=input_shape)
assert isinstance(reco_model, tf.keras.Model)
input_tensor = tf.random.uniform(shape=[batch_size, *input_shape], minval=0, maxval=1)
target = ["i", "am", "a", "jedi"]
out = reco_model(input_tensor, target, return_model_output=True, return_preds=True)
assert isinstance(out, dict)
assert len(out) == 3
assert isinstance(out["out_map"], tf.Tensor)
assert out["out_map"].dtype == tf.float32
assert isinstance(out["preds"], list)
assert len(out["preds"]) == batch_size
assert all(isinstance(word, str) and isinstance(conf, float) and 0 <= conf <= 1 for word, conf in out["preds"])
assert isinstance(out["loss"], tf.Tensor)
# test model in train mode needs targets
with pytest.raises(ValueError):
reco_model(input_tensor, None, training=True)
@pytest.mark.parametrize(
"post_processor, input_shape",
[
[SARPostProcessor, [2, 30, 119]],
[CTCPostProcessor, [2, 30, 119]],
[MASTERPostProcessor, [2, 30, 119]],
[ViTSTRPostProcessor, [2, 30, 119]],
],
)
def test_reco_postprocessors(post_processor, input_shape, mock_vocab):
processor = post_processor(mock_vocab)
decoded = processor(tf.random.uniform(shape=input_shape, minval=0, maxval=1, dtype=tf.float32))
assert isinstance(decoded, list)
assert all(isinstance(word, str) and isinstance(conf, float) and 0 <= conf <= 1 for word, conf in decoded)
assert len(decoded) == input_shape[0]
assert all(char in mock_vocab for word, _ in decoded for char in word)
# Repr
assert repr(processor) == f"{post_processor.__name__}(vocab_size={len(mock_vocab)})"
@pytest.fixture(scope="session")
def test_recognitionpredictor(mock_pdf, mock_vocab): # noqa: F811
batch_size = 4
predictor = RecognitionPredictor(
PreProcessor(output_size=(32, 128), batch_size=batch_size, preserve_aspect_ratio=True),
recognition.crnn_vgg16_bn(vocab=mock_vocab, input_shape=(32, 128, 3)),
)
pages = DocumentFile.from_pdf(mock_pdf).as_images()
# Create bounding boxes
boxes = np.array([[0.5, 0.5, 0.75, 0.75], [0.5, 0.5, 1.0, 1.0]], dtype=np.float32)
crops = extract_crops(pages[0], boxes)
out = predictor(crops)
# One prediction per crop
assert len(out) == boxes.shape[0]
assert all(isinstance(val, str) and isinstance(conf, float) for val, conf in out)
# Dimension check
with pytest.raises(ValueError):
input_crop = (255 * np.random.rand(1, 128, 64, 3)).astype(np.uint8)
_ = predictor([input_crop])
return predictor
@pytest.mark.parametrize(
"arch_name",
[
"crnn_vgg16_bn",
"crnn_mobilenet_v3_small",
"crnn_mobilenet_v3_large",
"sar_resnet31",
"master",
"vitstr_small",
"vitstr_base",
],
)
def test_recognition_zoo(arch_name):
batch_size = 2
# Model
predictor = recognition.zoo.recognition_predictor(arch_name, pretrained=False)
# object check
assert isinstance(predictor, RecognitionPredictor)
input_tensor = tf.random.uniform(shape=[batch_size, 128, 128, 3], minval=0, maxval=1)
out = predictor(input_tensor)
assert isinstance(out, list) and len(out) == batch_size
assert all(isinstance(word, str) and isinstance(conf, float) for word, conf in out)
@pytest.mark.parametrize(
"arch_name",
[
"crnn_vgg16_bn",
"crnn_mobilenet_v3_small",
"crnn_mobilenet_v3_large",
],
)
def test_crnn_beam_search(arch_name):
batch_size = 2
# Model
predictor = recognition.zoo.recognition_predictor(arch_name, pretrained=False)
# object check
assert isinstance(predictor, RecognitionPredictor)
input_tensor = tf.random.uniform(shape=[batch_size, 128, 128, 3], minval=0, maxval=1)
out = predictor(input_tensor, beam_width=10, top_paths=10)
assert isinstance(out, list) and len(out) == batch_size
assert all(
isinstance(words, list)
and isinstance(confs, list)
and all([isinstance(word, str) for word in words])
and all([isinstance(conf, float) for conf in confs])
for words, confs in out
)
def test_recognition_zoo_error():
with pytest.raises(ValueError):
_ = recognition.zoo.recognition_predictor("my_fancy_model", pretrained=False)
@pytest.mark.skipif(os.getenv("SLOW", "0") == "0", reason="slow test")
@pytest.mark.parametrize(
"arch_name, input_shape",
[
["crnn_vgg16_bn", (32, 128, 3)],
["crnn_mobilenet_v3_small", (32, 128, 3)],
["crnn_mobilenet_v3_large", (32, 128, 3)],
["sar_resnet31", (32, 128, 3)],
["master", (32, 128, 3)],
["vitstr_small", (32, 128, 3)], # testing one vitstr version is enough
],
)
def test_models_onnx_export(arch_name, input_shape):
# Model
batch_size = 2
tf.keras.backend.clear_session()
model = recognition.__dict__[arch_name](pretrained=True, exportable=True, input_shape=input_shape)
# SAR, MASTER, ViTSTR export currently only available with constant batch size
if arch_name in ["sar_resnet31", "master", "vitstr_small"]:
dummy_input = [tf.TensorSpec([batch_size, *input_shape], tf.float32, name="input")]
else:
# batch_size = None for dynamic batch size
dummy_input = [tf.TensorSpec([None, *input_shape], tf.float32, name="input")]
np_dummy_input = np.random.rand(batch_size, *input_shape).astype(np.float32)
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path, output = export_model_to_onnx(
model,
model_name=os.path.join(tmpdir, "model"),
dummy_input=dummy_input,
large_model=True if arch_name == "master" else False,
)
assert os.path.exists(model_path)
if arch_name == "master":
# large models are exported as zip archive
shutil.unpack_archive(model_path, tmpdir, "zip")
model_path = os.path.join(tmpdir, "__MODEL_PROTO.onnx")
else:
model_path = os.path.join(tmpdir, "model.onnx")
# Inference
ort_session = onnxruntime.InferenceSession(model_path, providers=["CPUExecutionProvider"])
ort_outs = ort_session.run(output, {"input": np_dummy_input})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape[0] == batch_size
|
import numpy as np
import pytest
from doctr import models
from doctr.file_utils import CLASS_NAME
from doctr.io import Document, DocumentFile
from doctr.io.elements import KIEDocument
from doctr.models import detection, recognition
from doctr.models.detection.predictor import DetectionPredictor
from doctr.models.detection.zoo import detection_predictor
from doctr.models.kie_predictor import KIEPredictor
from doctr.models.predictor import OCRPredictor
from doctr.models.preprocessor import PreProcessor
from doctr.models.recognition.predictor import RecognitionPredictor
from doctr.models.recognition.zoo import recognition_predictor
from doctr.utils.repr import NestedObject
@pytest.mark.parametrize(
"assume_straight_pages, straighten_pages",
[
[True, False],
[False, False],
[True, True],
],
)
def test_ocrpredictor(mock_pdf, mock_vocab, assume_straight_pages, straighten_pages):
det_bsize = 4
det_predictor = DetectionPredictor(
PreProcessor(output_size=(512, 512), batch_size=det_bsize),
detection.db_mobilenet_v3_large(
pretrained=True,
pretrained_backbone=False,
input_shape=(512, 512, 3),
assume_straight_pages=assume_straight_pages,
),
)
reco_bsize = 16
reco_predictor = RecognitionPredictor(
PreProcessor(output_size=(32, 128), batch_size=reco_bsize, preserve_aspect_ratio=True),
recognition.crnn_vgg16_bn(pretrained=False, pretrained_backbone=False, vocab=mock_vocab),
)
doc = DocumentFile.from_pdf(mock_pdf)
predictor = OCRPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=assume_straight_pages,
straighten_pages=straighten_pages,
detect_orientation=True,
detect_language=True,
)
if assume_straight_pages:
assert predictor.crop_orientation_predictor is None
else:
assert isinstance(predictor.crop_orientation_predictor, NestedObject)
out = predictor(doc)
assert isinstance(out, Document)
assert len(out.pages) == 2
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
orientation = 0
assert out.pages[0].orientation["value"] == orientation
language = "unknown"
assert out.pages[0].language["value"] == language
def test_trained_ocr_predictor(mock_tilted_payslip):
doc = DocumentFile.from_images(mock_tilted_payslip)
det_predictor = detection_predictor("db_resnet50", pretrained=True, batch_size=2, assume_straight_pages=True)
reco_predictor = recognition_predictor("crnn_vgg16_bn", pretrained=True, batch_size=128)
predictor = OCRPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=True,
straighten_pages=True,
)
out = predictor(doc)
assert out.pages[0].blocks[0].lines[0].words[0].value == "Mr."
geometry_mr = np.array(
[[0.08844472, 0.35763523], [0.11625107, 0.34320644], [0.12588427, 0.35771032], [0.09807791, 0.37213911]]
)
assert np.allclose(np.array(out.pages[0].blocks[0].lines[0].words[0].geometry), geometry_mr)
assert out.pages[0].blocks[1].lines[0].words[-1].value == "revised"
geometry_revised = np.array(
[[0.50422498, 0.19551784], [0.55741975, 0.16791493], [0.56705294, 0.18241881], [0.51385817, 0.21002172]]
)
assert np.allclose(np.array(out.pages[0].blocks[1].lines[0].words[-1].geometry), geometry_revised)
det_predictor = detection_predictor(
"db_resnet50",
pretrained=True,
batch_size=2,
assume_straight_pages=True,
preserve_aspect_ratio=True,
symmetric_pad=True,
)
predictor = OCRPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=True,
straighten_pages=True,
preserve_aspect_ratio=True,
symmetric_pad=True,
)
out = predictor(doc)
assert out.pages[0].blocks[0].lines[0].words[0].value == "Mr."
@pytest.mark.parametrize(
"assume_straight_pages, straighten_pages",
[
[True, False],
[False, False],
[True, True],
],
)
def test_kiepredictor(mock_pdf, mock_vocab, assume_straight_pages, straighten_pages):
det_bsize = 4
det_predictor = DetectionPredictor(
PreProcessor(output_size=(512, 512), batch_size=det_bsize),
detection.db_mobilenet_v3_large(
pretrained=True,
pretrained_backbone=False,
input_shape=(512, 512, 3),
assume_straight_pages=assume_straight_pages,
),
)
reco_bsize = 16
reco_predictor = RecognitionPredictor(
PreProcessor(output_size=(32, 128), batch_size=reco_bsize, preserve_aspect_ratio=True),
recognition.crnn_vgg16_bn(pretrained=False, pretrained_backbone=False, vocab=mock_vocab),
)
doc = DocumentFile.from_pdf(mock_pdf)
predictor = KIEPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=assume_straight_pages,
straighten_pages=straighten_pages,
detect_orientation=True,
detect_language=True,
)
if assume_straight_pages:
assert predictor.crop_orientation_predictor is None
else:
assert isinstance(predictor.crop_orientation_predictor, NestedObject)
out = predictor(doc)
assert isinstance(out, KIEDocument)
assert len(out.pages) == 2
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
orientation = 0
assert out.pages[0].orientation["value"] == orientation
language = "unknown"
assert out.pages[0].language["value"] == language
def test_trained_kie_predictor(mock_tilted_payslip):
doc = DocumentFile.from_images(mock_tilted_payslip)
det_predictor = detection_predictor("db_resnet50", pretrained=True, batch_size=2, assume_straight_pages=True)
reco_predictor = recognition_predictor("crnn_vgg16_bn", pretrained=True, batch_size=128)
predictor = KIEPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=True,
straighten_pages=True,
)
out = predictor(doc)
assert isinstance(out, KIEDocument)
assert out.pages[0].predictions[CLASS_NAME][0].value == "Mr."
geometry_mr = np.array(
[[0.08844472, 0.35763523], [0.11625107, 0.34320644], [0.12588427, 0.35771032], [0.09807791, 0.37213911]]
)
assert np.allclose(np.array(out.pages[0].predictions[CLASS_NAME][0].geometry), geometry_mr)
assert out.pages[0].predictions[CLASS_NAME][-1].value == "Kabir)"
geometry_revised = np.array(
[[0.43725992, 0.67232439], [0.49045468, 0.64472149], [0.50570724, 0.66768597], [0.452512473, 0.69528887]]
)
assert np.allclose(np.array(out.pages[0].predictions[CLASS_NAME][-1].geometry), geometry_revised)
det_predictor = detection_predictor(
"db_resnet50",
pretrained=True,
batch_size=2,
assume_straight_pages=True,
preserve_aspect_ratio=True,
symmetric_pad=True,
)
predictor = KIEPredictor(
det_predictor,
reco_predictor,
assume_straight_pages=True,
straighten_pages=True,
preserve_aspect_ratio=True,
symmetric_pad=True,
)
out = predictor(doc)
assert isinstance(out, KIEDocument)
assert out.pages[0].predictions[CLASS_NAME][0].value == "Mr."
def _test_predictor(predictor):
# Output checks
assert isinstance(predictor, OCRPredictor)
doc = [np.zeros((512, 512, 3), dtype=np.uint8)]
out = predictor(doc)
# Document
assert isinstance(out, Document)
# The input doc has 1 page
assert len(out.pages) == 1
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
def _test_kiepredictor(predictor):
# Output checks
assert isinstance(predictor, KIEPredictor)
doc = [np.zeros((512, 512, 3), dtype=np.uint8)]
out = predictor(doc)
# Document
assert isinstance(out, KIEDocument)
# The input doc has 1 page
assert len(out.pages) == 1
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
@pytest.mark.parametrize(
"det_arch, reco_arch",
[
["db_mobilenet_v3_large", "crnn_vgg16_bn"],
],
)
def test_zoo_models(det_arch, reco_arch):
# Model
predictor = models.ocr_predictor(det_arch, reco_arch, pretrained=True)
_test_predictor(predictor)
# passing model instance directly
det_model = detection.__dict__[det_arch](pretrained=True)
reco_model = recognition.__dict__[reco_arch](pretrained=True)
predictor = models.ocr_predictor(det_model, reco_model)
_test_predictor(predictor)
# passing recognition model as detection model
with pytest.raises(ValueError):
models.ocr_predictor(det_arch=reco_model, pretrained=True)
# passing detection model as recognition model
with pytest.raises(ValueError):
models.ocr_predictor(reco_arch=det_model, pretrained=True)
# KIE predictor
predictor = models.kie_predictor(det_arch, reco_arch, pretrained=True)
_test_kiepredictor(predictor)
# passing model instance directly
det_model = detection.__dict__[det_arch](pretrained=True)
reco_model = recognition.__dict__[reco_arch](pretrained=True)
predictor = models.kie_predictor(det_model, reco_model)
_test_kiepredictor(predictor)
# passing recognition model as detection model
with pytest.raises(ValueError):
models.kie_predictor(det_arch=reco_model, pretrained=True)
# passing detection model as recognition model
with pytest.raises(ValueError):
models.kie_predictor(reco_arch=det_model, pretrained=True)
|
import os
import pytest
import tensorflow as tf
from tensorflow.keras import Sequential, layers
from tensorflow.keras.applications import ResNet50
from doctr.models.utils import IntermediateLayerGetter, conv_sequence, load_pretrained_params
def test_load_pretrained_params(tmpdir_factory):
model = Sequential([layers.Dense(8, activation="relu", input_shape=(4,)), layers.Dense(4)])
# Retrieve this URL
url = "https://doctr-static.mindee.com/models?id=v0.1-models/tmp_checkpoint-4a98e492.zip&src=0"
# Temp cache dir
cache_dir = tmpdir_factory.mktemp("cache")
# Pass an incorrect hash
with pytest.raises(ValueError):
load_pretrained_params(model, url, "mywronghash", cache_dir=str(cache_dir), internal_name="")
# Let tit resolve the hash from the file name
load_pretrained_params(model, url, cache_dir=str(cache_dir), internal_name="")
# Check that the file was downloaded & the archive extracted
assert os.path.exists(cache_dir.join("models").join("tmp_checkpoint-4a98e492"))
# Check that archive was deleted
assert os.path.exists(cache_dir.join("models").join("tmp_checkpoint-4a98e492.zip"))
def test_conv_sequence():
assert len(conv_sequence(8, kernel_size=3)) == 1
assert len(conv_sequence(8, "relu", kernel_size=3)) == 1
assert len(conv_sequence(8, None, True, kernel_size=3)) == 2
assert len(conv_sequence(8, "relu", True, kernel_size=3)) == 3
def test_intermediate_layer_getter():
backbone = ResNet50(include_top=False, weights=None, pooling=None)
feat_extractor = IntermediateLayerGetter(backbone, ["conv2_block3_out", "conv3_block4_out"])
# Check num of output features
input_tensor = tf.random.uniform(shape=[1, 224, 224, 3], minval=0, maxval=1)
assert len(feat_extractor(input_tensor)) == 2
# Repr
assert repr(feat_extractor) == "IntermediateLayerGetter()"
|
import numpy as np
import pytest
import tensorflow as tf
from doctr.models.preprocessor import PreProcessor
@pytest.mark.parametrize(
"batch_size, output_size, input_tensor, expected_batches, expected_value",
[
[2, (128, 128), np.full((3, 256, 128, 3), 255, dtype=np.uint8), 1, 0.5], # numpy uint8
[2, (128, 128), np.ones((3, 256, 128, 3), dtype=np.float32), 1, 0.5], # numpy fp32
[2, (128, 128), tf.cast(tf.fill((3, 256, 128, 3), 255), dtype=tf.uint8), 1, 0.5], # tf uint8
[2, (128, 128), tf.ones((3, 128, 128, 3), dtype=tf.float32), 1, 0.5], # tf fp32
[2, (128, 128), [np.full((256, 128, 3), 255, dtype=np.uint8)] * 3, 2, 0.5], # list of numpy uint8
[2, (128, 128), [np.ones((256, 128, 3), dtype=np.float32)] * 3, 2, 0.5], # list of numpy fp32
[2, (128, 128), [tf.cast(tf.fill((256, 128, 3), 255), dtype=tf.uint8)] * 3, 2, 0.5], # list of tf uint8
[2, (128, 128), [tf.ones((128, 128, 3), dtype=tf.float32)] * 3, 2, 0.5], # list of tf fp32
],
)
def test_preprocessor(batch_size, output_size, input_tensor, expected_batches, expected_value):
processor = PreProcessor(output_size, batch_size)
# Invalid input type
with pytest.raises(TypeError):
processor(42)
# 4D check
with pytest.raises(AssertionError):
processor(np.full((256, 128, 3), 255, dtype=np.uint8))
with pytest.raises(TypeError):
processor(np.full((1, 256, 128, 3), 255, dtype=np.int32))
# 3D check
with pytest.raises(AssertionError):
processor([np.full((3, 256, 128, 3), 255, dtype=np.uint8)])
with pytest.raises(TypeError):
processor([np.full((256, 128, 3), 255, dtype=np.int32)])
out = processor(input_tensor)
assert isinstance(out, list) and len(out) == expected_batches
assert all(isinstance(b, tf.Tensor) for b in out)
assert all(b.dtype == tf.float32 for b in out)
assert all(b.shape[1:3] == output_size for b in out)
assert all(tf.math.reduce_all(b == expected_value) for b in out)
assert len(repr(processor).split("\n")) == 4
|
from typing import List, Tuple
import tensorflow as tf
from doctr.datasets import DataLoader
class MockDataset:
def __init__(self, input_size):
self.data: List[Tuple[float, bool]] = [
(1, True),
(0, False),
(0.5, True),
]
self.input_size = input_size
def __len__(self):
return len(self.data)
def __getitem__(self, index):
val, label = self.data[index]
return tf.cast(tf.fill(self.input_size, val), dtype=tf.float32), tf.constant(label, dtype=tf.bool)
class MockDatasetBis(MockDataset):
@staticmethod
def collate_fn(samples):
x, y = zip(*samples)
return tf.stack(x, axis=0), list(y)
def test_dataloader():
loader = DataLoader(
MockDataset((32, 32)),
shuffle=True,
batch_size=2,
drop_last=True,
)
ds_iter = iter(loader)
num_batches = 0
for x, y in ds_iter:
num_batches += 1
assert len(loader) == 1
assert num_batches == 1
assert isinstance(x, tf.Tensor) and isinstance(y, tf.Tensor)
assert x.shape == (2, 32, 32)
assert y.shape == (2,)
# Drop last
loader = DataLoader(
MockDataset((32, 32)),
shuffle=True,
batch_size=2,
drop_last=False,
)
ds_iter = iter(loader)
num_batches = 0
for x, y in ds_iter:
num_batches += 1
assert loader.num_batches == 2
assert num_batches == 2
# Custom collate
loader = DataLoader(
MockDatasetBis((32, 32)),
shuffle=True,
batch_size=2,
drop_last=False,
)
ds_iter = iter(loader)
x, y = next(ds_iter)
assert isinstance(x, tf.Tensor) and isinstance(y, list)
assert x.shape == (2, 32, 32)
assert len(y) == 2
|
import os
import tempfile
import cv2
import numpy as np
import onnxruntime
import pytest
import tensorflow as tf
from doctr.models import classification
from doctr.models.classification.predictor import CropOrientationPredictor
from doctr.models.utils import export_model_to_onnx
@pytest.mark.parametrize(
"arch_name, input_shape, output_size",
[
["vgg16_bn_r", (32, 32, 3), (126,)],
["resnet18", (32, 32, 3), (126,)],
["resnet31", (32, 32, 3), (126,)],
["resnet34", (32, 32, 3), (126,)],
["resnet34_wide", (32, 32, 3), (126,)],
["resnet50", (32, 32, 3), (126,)],
["magc_resnet31", (32, 32, 3), (126,)],
["mobilenet_v3_small", (32, 32, 3), (126,)],
["mobilenet_v3_large", (32, 32, 3), (126,)],
["vit_s", (32, 32, 3), (126,)],
["vit_b", (32, 32, 3), (126,)],
],
)
def test_classification_architectures(arch_name, input_shape, output_size):
# Model
batch_size = 2
tf.keras.backend.clear_session()
model = classification.__dict__[arch_name](pretrained=True, include_top=True, input_shape=input_shape)
# Forward
out = model(tf.random.uniform(shape=[batch_size, *input_shape], maxval=1, dtype=tf.float32))
# Output checks
assert isinstance(out, tf.Tensor)
assert out.dtype == tf.float32
assert out.numpy().shape == (batch_size, *output_size)
@pytest.mark.parametrize(
"arch_name, input_shape",
[
["mobilenet_v3_small_orientation", (128, 128, 3)],
],
)
def test_classification_models(arch_name, input_shape):
batch_size = 8
reco_model = classification.__dict__[arch_name](pretrained=True, input_shape=input_shape)
assert isinstance(reco_model, tf.keras.Model)
input_tensor = tf.random.uniform(shape=[batch_size, *input_shape], minval=0, maxval=1)
out = reco_model(input_tensor)
assert isinstance(out, tf.Tensor)
assert out.shape.as_list() == [8, 4]
@pytest.mark.parametrize(
"arch_name",
[
"mobilenet_v3_small_orientation",
],
)
def test_classification_zoo(arch_name):
batch_size = 16
# Model
predictor = classification.zoo.crop_orientation_predictor(arch_name, pretrained=False)
with pytest.raises(ValueError):
predictor = classification.zoo.crop_orientation_predictor(arch="wrong_model", pretrained=False)
# object check
assert isinstance(predictor, CropOrientationPredictor)
input_tensor = tf.random.uniform(shape=[batch_size, 128, 128, 3], minval=0, maxval=1)
out = predictor(input_tensor)
assert isinstance(out, list) and len(out) == batch_size
assert all(isinstance(pred, int) for pred in out)
def test_crop_orientation_model(mock_text_box):
text_box_0 = cv2.imread(mock_text_box)
text_box_90 = np.rot90(text_box_0, 1)
text_box_180 = np.rot90(text_box_0, 2)
text_box_270 = np.rot90(text_box_0, 3)
classifier = classification.crop_orientation_predictor("mobilenet_v3_small_orientation", pretrained=True)
assert classifier([text_box_0, text_box_90, text_box_180, text_box_270]) == [0, 1, 2, 3]
@pytest.mark.parametrize(
"arch_name, input_shape, output_size",
[
["vgg16_bn_r", (32, 32, 3), (126,)],
["resnet18", (32, 32, 3), (126,)],
["resnet31", (32, 32, 3), (126,)],
["resnet34", (32, 32, 3), (126,)],
["resnet34_wide", (32, 32, 3), (126,)],
["resnet50", (32, 32, 3), (126,)],
["magc_resnet31", (32, 32, 3), (126,)],
["vit_b", (32, 32, 3), (126,)],
# Disabled for now
# ["mobilenet_v3_small", (512, 512, 3), (126,)],
# ["mobilenet_v3_large", (512, 512, 3), (126,)],
# ["mobilenet_v3_small_orientation", (128, 128, 3), (4,)],
],
)
def test_models_onnx_export(arch_name, input_shape, output_size):
# Model
batch_size = 2
tf.keras.backend.clear_session()
if arch_name == "mobilenet_v3_small_orientation":
model = classification.__dict__[arch_name](pretrained=True, input_shape=input_shape)
else:
model = classification.__dict__[arch_name](pretrained=True, include_top=True, input_shape=input_shape)
if arch_name == "vit_b":
# vit model needs a fixed batch size
dummy_input = [tf.TensorSpec([2, *input_shape], tf.float32, name="input")]
else:
# batch_size = None for dynamic batch size
dummy_input = [tf.TensorSpec([None, *input_shape], tf.float32, name="input")]
np_dummy_input = np.random.rand(batch_size, *input_shape).astype(np.float32)
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path, output = export_model_to_onnx(
model, model_name=os.path.join(tmpdir, "model"), dummy_input=dummy_input
)
assert os.path.exists(model_path)
# Inference
ort_session = onnxruntime.InferenceSession(
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(output, {"input": np_dummy_input})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)
|
import os
from shutil import move
import numpy as np
import pytest
import tensorflow as tf
from doctr import datasets
from doctr.datasets import DataLoader
from doctr.file_utils import CLASS_NAME
from doctr.transforms import Resize
def _validate_dataset(ds, input_size, batch_size=2, class_indices=False, is_polygons=False):
# Fetch one sample
img, target = ds[0]
assert isinstance(img, tf.Tensor)
assert img.shape == (*input_size, 3)
assert img.dtype == tf.float32
assert isinstance(target, dict)
assert isinstance(target["boxes"], np.ndarray) and target["boxes"].dtype == np.float32
if is_polygons:
assert target["boxes"].ndim == 3 and target["boxes"].shape[1:] == (4, 2)
else:
assert target["boxes"].ndim == 2 and target["boxes"].shape[1:] == (4,)
assert np.all(np.logical_and(target["boxes"] <= 1, target["boxes"] >= 0))
if class_indices:
assert isinstance(target["labels"], np.ndarray) and target["labels"].dtype == np.int64
else:
assert isinstance(target["labels"], list) and all(isinstance(s, str) for s in target["labels"])
assert len(target["labels"]) == len(target["boxes"])
# Check batching
loader = DataLoader(ds, batch_size=batch_size)
images, targets = next(iter(loader))
assert isinstance(images, tf.Tensor) and images.shape == (batch_size, *input_size, 3)
assert isinstance(targets, list) and all(isinstance(elt, dict) for elt in targets)
def _validate_dataset_recognition_part(ds, input_size, batch_size=2):
# Fetch one sample
img, label = ds[0]
assert isinstance(img, tf.Tensor)
assert img.shape == (*input_size, 3)
assert img.dtype == tf.float32
assert isinstance(label, str)
# Check batching
loader = DataLoader(ds, batch_size=batch_size)
images, labels = next(iter(loader))
assert isinstance(images, tf.Tensor) and images.shape == (batch_size, *input_size, 3)
assert isinstance(labels, list) and all(isinstance(elt, str) for elt in labels)
def test_detection_dataset(mock_image_folder, mock_detection_label):
input_size = (1024, 1024)
ds = datasets.DetectionDataset(
img_folder=mock_image_folder,
label_path=mock_detection_label,
img_transforms=Resize(input_size),
)
assert len(ds) == 5
img, target_dict = ds[0]
target = target_dict[CLASS_NAME]
assert isinstance(img, tf.Tensor)
assert img.shape[:2] == input_size
assert img.dtype == tf.float32
# Bounding boxes
assert isinstance(target_dict, dict)
assert isinstance(target, np.ndarray) and target.dtype == np.float32
assert np.all(np.logical_and(target[:, :4] >= 0, target[:, :4] <= 1))
assert target.shape[1] == 4
loader = DataLoader(ds, batch_size=2)
images, targets = next(iter(loader))
assert isinstance(images, tf.Tensor) and images.shape == (2, *input_size, 3)
assert isinstance(targets, list) and all(
isinstance(elt, np.ndarray) for target in targets for elt in target.values()
)
# Rotated DS
rotated_ds = datasets.DetectionDataset(
img_folder=mock_image_folder,
label_path=mock_detection_label,
img_transforms=Resize(input_size),
use_polygons=True,
)
_, r_target = rotated_ds[0]
assert r_target[CLASS_NAME].shape[1:] == (4, 2)
# File existence check
img_name, _ = ds.data[0]
move(os.path.join(ds.root, img_name), os.path.join(ds.root, "tmp_file"))
with pytest.raises(FileNotFoundError):
datasets.DetectionDataset(mock_image_folder, mock_detection_label)
move(os.path.join(ds.root, "tmp_file"), os.path.join(ds.root, img_name))
def test_recognition_dataset(mock_image_folder, mock_recognition_label):
input_size = (32, 128)
ds = datasets.RecognitionDataset(
img_folder=mock_image_folder,
labels_path=mock_recognition_label,
img_transforms=Resize(input_size, preserve_aspect_ratio=True),
)
assert len(ds) == 5
image, label = ds[0]
assert isinstance(image, tf.Tensor)
assert image.shape[:2] == input_size
assert image.dtype == tf.float32
assert isinstance(label, str)
loader = DataLoader(ds, batch_size=2)
images, labels = next(iter(loader))
assert isinstance(images, tf.Tensor) and images.shape == (2, *input_size, 3)
assert isinstance(labels, list) and all(isinstance(elt, str) for elt in labels)
# File existence check
img_name, _ = ds.data[0]
move(os.path.join(ds.root, img_name), os.path.join(ds.root, "tmp_file"))
with pytest.raises(FileNotFoundError):
datasets.RecognitionDataset(mock_image_folder, mock_recognition_label)
move(os.path.join(ds.root, "tmp_file"), os.path.join(ds.root, img_name))
@pytest.mark.parametrize(
"use_polygons",
[False, True],
)
def test_ocrdataset(mock_ocrdataset, use_polygons):
input_size = (512, 512)
ds = datasets.OCRDataset(
*mock_ocrdataset,
img_transforms=Resize(input_size),
use_polygons=use_polygons,
)
assert len(ds) == 3
_validate_dataset(ds, input_size, is_polygons=use_polygons)
# File existence check
img_name, _ = ds.data[0]
move(os.path.join(ds.root, img_name), os.path.join(ds.root, "tmp_file"))
with pytest.raises(FileNotFoundError):
datasets.OCRDataset(*mock_ocrdataset)
move(os.path.join(ds.root, "tmp_file"), os.path.join(ds.root, img_name))
def test_charactergenerator():
input_size = (32, 32)
vocab = "abcdef"
ds = datasets.CharacterGenerator(
vocab=vocab,
num_samples=10,
cache_samples=True,
img_transforms=Resize(input_size),
)
assert len(ds) == 10
image, label = ds[0]
assert isinstance(image, tf.Tensor)
assert image.shape[:2] == input_size
assert image.dtype == tf.float32
assert isinstance(label, int) and label < len(vocab)
loader = DataLoader(ds, batch_size=2, collate_fn=ds.collate_fn)
images, targets = next(iter(loader))
assert isinstance(images, tf.Tensor) and images.shape == (2, *input_size, 3)
assert isinstance(targets, tf.Tensor) and targets.shape == (2,)
assert targets.dtype == tf.int32
def test_wordgenerator():
input_size = (32, 128)
wordlen_range = (1, 10)
vocab = "abcdef"
ds = datasets.WordGenerator(
vocab=vocab,
min_chars=wordlen_range[0],
max_chars=wordlen_range[1],
num_samples=10,
cache_samples=True,
img_transforms=Resize(input_size),
)
assert len(ds) == 10
image, target = ds[0]
assert isinstance(image, tf.Tensor)
assert image.shape[:2] == input_size
assert image.dtype == tf.float32
assert isinstance(target, str) and len(target) >= wordlen_range[0] and len(target) <= wordlen_range[1]
assert all(char in vocab for char in target)
loader = DataLoader(ds, batch_size=2, collate_fn=ds.collate_fn)
images, targets = next(iter(loader))
assert isinstance(images, tf.Tensor) and images.shape == (2, *input_size, 3)
assert isinstance(targets, list) and len(targets) == 2 and all(isinstance(t, str) for t in targets)
@pytest.mark.parametrize(
"input_size, num_samples, rotate",
[
[[512, 512], 3, True], # Actual set has 2700 training samples and 300 test samples
[[512, 512], 3, False],
],
)
def test_artefact_detection(input_size, num_samples, rotate, mock_doc_artefacts):
# monkeypatch the path to temporary dataset
datasets.DocArtefacts.URL = mock_doc_artefacts
datasets.DocArtefacts.SHA256 = None
ds = datasets.DocArtefacts(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
cache_dir="/".join(mock_doc_artefacts.split("/")[:-2]),
cache_subdir=mock_doc_artefacts.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"DocArtefacts(train={True})"
_validate_dataset(ds, input_size, class_indices=True, is_polygons=rotate)
# NOTE: following datasets support also recognition task
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 626 training samples and 360 test samples
[[512, 512], 3, False, False],
[[32, 128], 15, True, True], # recognition
[[32, 128], 15, False, True],
],
)
def test_sroie(input_size, num_samples, rotate, recognition, mock_sroie_dataset):
# monkeypatch the path to temporary dataset
datasets.SROIE.TRAIN = (mock_sroie_dataset, None)
ds = datasets.SROIE(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_sroie_dataset.split("/")[:-2]),
cache_subdir=mock_sroie_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"SROIE(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 5, True, False], # Actual set has 229 train and 233 test samples
[[512, 512], 5, False, False],
[[32, 128], 25, True, True], # recognition
[[32, 128], 25, False, True],
],
)
def test_ic13_dataset(input_size, num_samples, rotate, recognition, mock_ic13):
ds = datasets.IC13(
*mock_ic13,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
)
assert len(ds) == num_samples
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 7149 train and 796 test samples
[[512, 512], 3, False, False],
[[32, 128], 5, True, True], # recognition
[[32, 128], 5, False, True],
],
)
def test_imgur5k_dataset(input_size, num_samples, rotate, recognition, mock_imgur5k):
ds = datasets.IMGUR5K(
*mock_imgur5k,
train=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
)
assert len(ds) == num_samples - 1 # -1 because of the test set 90 / 10 split
assert repr(ds) == f"IMGUR5K(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[32, 128], 3, True, False], # Actual set has 33402 training samples and 13068 test samples
[[32, 128], 3, False, False],
[[32, 128], 12, True, True], # recognition
[[32, 128], 12, False, True],
],
)
def test_svhn(input_size, num_samples, rotate, recognition, mock_svhn_dataset):
# monkeypatch the path to temporary dataset
datasets.SVHN.TRAIN = (mock_svhn_dataset, None, "svhn_train.tar")
ds = datasets.SVHN(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_svhn_dataset.split("/")[:-2]),
cache_subdir=mock_svhn_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"SVHN(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 149 training samples and 50 test samples
[[512, 512], 3, False, False],
[[32, 128], 9, True, True], # recognition
[[32, 128], 9, False, True],
],
)
def test_funsd(input_size, num_samples, rotate, recognition, mock_funsd_dataset):
# monkeypatch the path to temporary dataset
datasets.FUNSD.URL = mock_funsd_dataset
datasets.FUNSD.SHA256 = None
datasets.FUNSD.FILE_NAME = "funsd.zip"
ds = datasets.FUNSD(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_funsd_dataset.split("/")[:-2]),
cache_subdir=mock_funsd_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"FUNSD(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 800 training samples and 100 test samples
[[512, 512], 3, False, False],
[[32, 128], 9, True, True], # recognition
[[32, 128], 9, False, True],
],
)
def test_cord(input_size, num_samples, rotate, recognition, mock_cord_dataset):
# monkeypatch the path to temporary dataset
datasets.CORD.TRAIN = (mock_cord_dataset, None)
ds = datasets.CORD(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_cord_dataset.split("/")[:-2]),
cache_subdir=mock_cord_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"CORD(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 2, True, False], # Actual set has 772875 training samples and 85875 test samples
[[512, 512], 2, False, False],
[[32, 128], 10, True, True], # recognition
[[32, 128], 10, False, True],
],
)
def test_synthtext(input_size, num_samples, rotate, recognition, mock_synthtext_dataset):
# monkeypatch the path to temporary dataset
datasets.SynthText.URL = mock_synthtext_dataset
datasets.SynthText.SHA256 = None
ds = datasets.SynthText(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_synthtext_dataset.split("/")[:-2]),
cache_subdir=mock_synthtext_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"SynthText(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[32, 128], 1, True, False], # Actual set has 2000 training samples and 3000 test samples
[[32, 128], 1, False, False],
[[32, 128], 1, True, True], # recognition
[[32, 128], 1, False, True],
],
)
def test_iiit5k(input_size, num_samples, rotate, recognition, mock_iiit5k_dataset):
# monkeypatch the path to temporary dataset
datasets.IIIT5K.URL = mock_iiit5k_dataset
datasets.IIIT5K.SHA256 = None
ds = datasets.IIIT5K(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_iiit5k_dataset.split("/")[:-2]),
cache_subdir=mock_iiit5k_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"IIIT5K(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size, batch_size=1)
else:
_validate_dataset(ds, input_size, batch_size=1, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 100 training samples and 249 test samples
[[512, 512], 3, False, False],
[[32, 128], 3, True, True], # recognition
[[32, 128], 3, False, True],
],
)
def test_svt(input_size, num_samples, rotate, recognition, mock_svt_dataset):
# monkeypatch the path to temporary dataset
datasets.SVT.URL = mock_svt_dataset
datasets.SVT.SHA256 = None
ds = datasets.SVT(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_svt_dataset.split("/")[:-2]),
cache_subdir=mock_svt_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"SVT(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
@pytest.mark.parametrize(
"input_size, num_samples, rotate, recognition",
[
[[512, 512], 3, True, False], # Actual set has 246 training samples and 249 test samples
[[512, 512], 3, False, False],
[[32, 128], 3, True, True], # recognition
[[32, 128], 3, False, True],
],
)
def test_ic03(input_size, num_samples, rotate, recognition, mock_ic03_dataset):
# monkeypatch the path to temporary dataset
datasets.IC03.TRAIN = (mock_ic03_dataset, None, "ic03_train.zip")
ds = datasets.IC03(
train=True,
download=True,
img_transforms=Resize(input_size),
use_polygons=rotate,
recognition_task=recognition,
cache_dir="/".join(mock_ic03_dataset.split("/")[:-2]),
cache_subdir=mock_ic03_dataset.split("/")[-2],
)
assert len(ds) == num_samples
assert repr(ds) == f"IC03(train={True})"
if recognition:
_validate_dataset_recognition_part(ds, input_size)
else:
_validate_dataset(ds, input_size, is_polygons=rotate)
# NOTE: following datasets are only for recognition task
def test_mjsynth_dataset(mock_mjsynth_dataset):
input_size = (32, 128)
ds = datasets.MJSynth(
*mock_mjsynth_dataset,
img_transforms=Resize(input_size, preserve_aspect_ratio=True),
)
assert len(ds) == 4 # Actual set has 7581382 train and 1337891 test samples
assert repr(ds) == f"MJSynth(train={True})"
_validate_dataset_recognition_part(ds, input_size)
|
import os
import tempfile
import numpy as np
import onnxruntime
import pytest
import tensorflow as tf
from doctr.file_utils import CLASS_NAME
from doctr.io import DocumentFile
from doctr.models import detection
from doctr.models.detection._utils import dilate, erode
from doctr.models.detection.predictor import DetectionPredictor
from doctr.models.preprocessor import PreProcessor
from doctr.models.utils import export_model_to_onnx
@pytest.mark.parametrize(
"arch_name, input_shape, output_size, out_prob",
[
["db_resnet50", (512, 512, 3), (512, 512, 1), True],
["db_mobilenet_v3_large", (512, 512, 3), (512, 512, 1), True],
["linknet_resnet18", (512, 512, 3), (512, 512, 1), False],
["linknet_resnet34", (512, 512, 3), (512, 512, 1), False],
["linknet_resnet50", (512, 512, 3), (512, 512, 1), False],
],
)
def test_detection_models(arch_name, input_shape, output_size, out_prob):
batch_size = 2
tf.keras.backend.clear_session()
model = detection.__dict__[arch_name](pretrained=True, input_shape=input_shape)
assert isinstance(model, tf.keras.Model)
input_tensor = tf.random.uniform(shape=[batch_size, *input_shape], minval=0, maxval=1)
target = [
{CLASS_NAME: np.array([[0.5, 0.5, 1, 1], [0.5, 0.5, 0.8, 0.8]], dtype=np.float32)},
{CLASS_NAME: np.array([[0.5, 0.5, 1, 1], [0.5, 0.5, 0.8, 0.9]], dtype=np.float32)},
]
# test training model
out = model(input_tensor, target, return_model_output=True, return_preds=True, training=True)
assert isinstance(out, dict)
assert len(out) == 3
# Check proba map
assert isinstance(out["out_map"], tf.Tensor)
assert out["out_map"].dtype == tf.float32
seg_map = out["out_map"].numpy()
assert seg_map.shape == (batch_size, *output_size)
if out_prob:
assert np.all(np.logical_and(seg_map >= 0, seg_map <= 1))
# Check boxes
for boxes_dict in out["preds"]:
for boxes in boxes_dict.values():
assert boxes.shape[1] == 5
assert np.all(boxes[:, :2] < boxes[:, 2:4])
assert np.all(boxes[:, :4] >= 0) and np.all(boxes[:, :4] <= 1)
# Check loss
assert isinstance(out["loss"], tf.Tensor)
# Target checks
target = [
{CLASS_NAME: np.array([[0, 0, 1, 1]], dtype=np.uint8)},
{CLASS_NAME: np.array([[0, 0, 1, 1]], dtype=np.uint8)},
]
with pytest.raises(AssertionError):
out = model(input_tensor, target, training=True)
target = [
{CLASS_NAME: np.array([[0, 0, 1.5, 1.5]], dtype=np.float32)},
{CLASS_NAME: np.array([[-0.2, -0.3, 1, 1]], dtype=np.float32)},
]
with pytest.raises(ValueError):
out = model(input_tensor, target, training=True)
# Check the rotated case
target = [
{CLASS_NAME: np.array([[0.75, 0.75, 0.5, 0.5, 0], [0.65, 0.65, 0.3, 0.3, 0]], dtype=np.float32)},
{CLASS_NAME: np.array([[0.75, 0.75, 0.5, 0.5, 0], [0.65, 0.7, 0.3, 0.4, 0]], dtype=np.float32)},
]
loss = model(input_tensor, target, training=True)["loss"]
assert isinstance(loss, tf.Tensor) and ((loss - out["loss"]) / loss).numpy() < 25e-2
@pytest.fixture(scope="session")
def test_detectionpredictor(mock_pdf): # noqa: F811
batch_size = 4
predictor = DetectionPredictor(
PreProcessor(output_size=(512, 512), batch_size=batch_size), detection.db_resnet50(input_shape=(512, 512, 3))
)
pages = DocumentFile.from_pdf(mock_pdf).as_images()
out = predictor(pages)
# The input PDF has 2 pages
assert len(out) == 2
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
return predictor
@pytest.fixture(scope="session")
def test_rotated_detectionpredictor(mock_pdf): # noqa: F811
batch_size = 4
predictor = DetectionPredictor(
PreProcessor(output_size=(512, 512), batch_size=batch_size),
detection.db_resnet50(assume_straight_pages=False, input_shape=(512, 512, 3)),
)
pages = DocumentFile.from_pdf(mock_pdf).as_images()
out = predictor(pages)
# The input PDF has 2 pages
assert len(out) == 2
# Dimension check
with pytest.raises(ValueError):
input_page = (255 * np.random.rand(1, 256, 512, 3)).astype(np.uint8)
_ = predictor([input_page])
return predictor
@pytest.mark.parametrize(
"arch_name",
[
"db_resnet50",
"db_mobilenet_v3_large",
"linknet_resnet18",
],
)
def test_detection_zoo(arch_name):
# Model
tf.keras.backend.clear_session()
predictor = detection.zoo.detection_predictor(arch_name, pretrained=False)
# object check
assert isinstance(predictor, DetectionPredictor)
input_tensor = tf.random.uniform(shape=[2, 1024, 1024, 3], minval=0, maxval=1)
out = predictor(input_tensor)
assert all(isinstance(boxes, dict) for boxes in out)
assert all(isinstance(boxes[CLASS_NAME], np.ndarray) and boxes[CLASS_NAME].shape[1] == 5 for boxes in out)
def test_detection_zoo_error():
with pytest.raises(ValueError):
_ = detection.zoo.detection_predictor("my_fancy_model", pretrained=False)
def test_erode():
x = np.zeros((1, 3, 3, 1), dtype=np.float32)
x[:, 1, 1] = 1
x = tf.convert_to_tensor(x)
expected = tf.zeros((1, 3, 3, 1))
out = erode(x, 3)
assert tf.math.reduce_all(out == expected)
def test_dilate():
x = np.zeros((1, 3, 3, 1), dtype=np.float32)
x[:, 1, 1] = 1
x = tf.convert_to_tensor(x)
expected = tf.ones((1, 3, 3, 1))
out = dilate(x, 3)
assert tf.math.reduce_all(out == expected)
@pytest.mark.parametrize(
"arch_name, input_shape, output_size",
[
["db_resnet50", (512, 512, 3), (512, 512, 1)],
["db_mobilenet_v3_large", (512, 512, 3), (512, 512, 1)],
["linknet_resnet18", (1024, 1024, 3), (1024, 1024, 1)],
["linknet_resnet34", (1024, 1024, 3), (1024, 1024, 1)],
["linknet_resnet50", (512, 512, 3), (512, 512, 1)],
],
)
def test_models_onnx_export(arch_name, input_shape, output_size):
# Model
batch_size = 2
tf.keras.backend.clear_session()
model = detection.__dict__[arch_name](pretrained=True, exportable=True, input_shape=input_shape)
# batch_size = None for dynamic batch size
dummy_input = [tf.TensorSpec([None, *input_shape], tf.float32, name="input")]
np_dummy_input = np.random.rand(batch_size, *input_shape).astype(np.float32)
with tempfile.TemporaryDirectory() as tmpdir:
# Export
model_path, output = export_model_to_onnx(
model, model_name=os.path.join(tmpdir, "model"), dummy_input=dummy_input
)
assert os.path.exists(model_path)
# Inference
ort_session = onnxruntime.InferenceSession(
os.path.join(tmpdir, "model.onnx"), providers=["CPUExecutionProvider"]
)
ort_outs = ort_session.run(output, {"input": np_dummy_input})
assert isinstance(ort_outs, list) and len(ort_outs) == 1
assert ort_outs[0].shape == (batch_size, *output_size)
|
import json
import os
import tempfile
import pytest
import tensorflow as tf
from doctr import models
from doctr.models.factory import _save_model_and_config_for_hf_hub, from_hub, push_to_hf_hub
def test_push_to_hf_hub():
model = models.classification.resnet18(pretrained=False)
with pytest.raises(ValueError):
# run_config and/or arch must be specified
push_to_hf_hub(model, model_name="test", task="classification")
with pytest.raises(ValueError):
# task must be one of classification, detection, recognition, obj_detection
push_to_hf_hub(model, model_name="test", task="invalid_task", arch="mobilenet_v3_small")
with pytest.raises(ValueError):
# arch not in available architectures for task
push_to_hf_hub(model, model_name="test", task="detection", arch="crnn_mobilenet_v3_large")
@pytest.mark.parametrize(
"arch_name, task_name, dummy_model_id",
[
["vgg16_bn_r", "classification", "Felix92/doctr-dummy-tf-vgg16-bn-r"],
["resnet18", "classification", "Felix92/doctr-dummy-tf-resnet18"],
["resnet31", "classification", "Felix92/doctr-dummy-tf-resnet31"],
["resnet34", "classification", "Felix92/doctr-dummy-tf-resnet34"],
["resnet34_wide", "classification", "Felix92/doctr-dummy-tf-resnet34-wide"],
["resnet50", "classification", "Felix92/doctr-dummy-tf-resnet50"],
["magc_resnet31", "classification", "Felix92/doctr-dummy-tf-magc-resnet31"],
["mobilenet_v3_large", "classification", "Felix92/doctr-dummy-tf-mobilenet-v3-large"],
["vit_b", "classification", "Felix92/doctr-dummy-tf-vit-b"],
["db_resnet50", "detection", "Felix92/doctr-dummy-tf-db-resnet50"],
["db_mobilenet_v3_large", "detection", "Felix92/doctr-dummy-tf-db-mobilenet-v3-large"],
["linknet_resnet18", "detection", "Felix92/doctr-dummy-tf-linknet-resnet18"],
["linknet_resnet18_rotation", "detection", "Felix92/doctr-dummy-tf-linknet-resnet18-rotation"],
["linknet_resnet34", "detection", "Felix92/doctr-dummy-tf-linknet-resnet34"],
["linknet_resnet50", "detection", "Felix92/doctr-dummy-tf-linknet-resnet50"],
["crnn_vgg16_bn", "recognition", "Felix92/doctr-dummy-tf-crnn-vgg16-bn"],
["crnn_mobilenet_v3_large", "recognition", "Felix92/doctr-dummy-tf-crnn-mobilenet-v3-large"],
["sar_resnet31", "recognition", "Felix92/doctr-dummy-tf-sar-resnet31"],
["master", "recognition", "Felix92/doctr-dummy-tf-master"],
["vitstr_small", "recognition", "Felix92/doctr-dummy-tf-vitstr-small"],
],
)
def test_models_for_hub(arch_name, task_name, dummy_model_id, tmpdir):
with tempfile.TemporaryDirectory() as tmp_dir:
tf.keras.backend.clear_session()
model = models.__dict__[task_name].__dict__[arch_name](pretrained=True)
_save_model_and_config_for_hf_hub(model, arch=arch_name, task=task_name, save_dir=tmp_dir)
assert hasattr(model, "cfg")
assert len(os.listdir(tmp_dir)) == 2
assert os.path.exists(tmp_dir + "/tf_model")
assert len(os.listdir(tmp_dir + "/tf_model")) == 3
assert os.path.exists(tmp_dir + "/config.json")
tmp_config = json.load(open(tmp_dir + "/config.json"))
assert arch_name == tmp_config["arch"]
assert task_name == tmp_config["task"]
assert all(key in model.cfg.keys() for key in tmp_config.keys())
# test from hub
tf.keras.backend.clear_session()
hub_model = from_hub(repo_id=dummy_model_id)
assert isinstance(hub_model, type(model))
|
import numpy as np
import pytest
import tensorflow as tf
from doctr.io import decode_img_as_tensor, read_img_as_tensor, tensor_from_numpy
def test_read_img_as_tensor(mock_image_path):
img = read_img_as_tensor(mock_image_path)
assert isinstance(img, tf.Tensor)
assert img.dtype == tf.float32
assert img.shape == (900, 1200, 3)
img = read_img_as_tensor(mock_image_path, dtype=tf.float16)
assert img.dtype == tf.float16
img = read_img_as_tensor(mock_image_path, dtype=tf.uint8)
assert img.dtype == tf.uint8
def test_decode_img_as_tensor(mock_image_stream):
img = decode_img_as_tensor(mock_image_stream)
assert isinstance(img, tf.Tensor)
assert img.dtype == tf.float32
assert img.shape == (900, 1200, 3)
img = decode_img_as_tensor(mock_image_stream, dtype=tf.float16)
assert img.dtype == tf.float16
img = decode_img_as_tensor(mock_image_stream, dtype=tf.uint8)
assert img.dtype == tf.uint8
def test_tensor_from_numpy(mock_image_stream):
with pytest.raises(ValueError):
tensor_from_numpy(np.zeros((256, 256, 3)), tf.int64)
out = tensor_from_numpy(np.zeros((256, 256, 3), dtype=np.uint8))
assert isinstance(out, tf.Tensor)
assert out.dtype == tf.float32
assert out.shape == (256, 256, 3)
out = tensor_from_numpy(np.zeros((256, 256, 3), dtype=np.uint8), dtype=tf.float16)
assert out.dtype == tf.float16
out = tensor_from_numpy(np.zeros((256, 256, 3), dtype=np.uint8), dtype=tf.uint8)
assert out.dtype == tf.uint8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.