repo_name
stringlengths 6
130
| hexsha
list | file_path
list | code
list | apis
list | possible_versions
list |
---|---|---|---|---|---|
JustinZhengBC/pandas
|
[
"1f02bf240c3d0d3da338af868d056bfc169b28c2"
] |
[
"pandas/tests/scalar/timedelta/test_timedelta.py"
] |
[
"\"\"\" test the scalar Timedelta \"\"\"\nfrom datetime import timedelta\n\nimport numpy as np\nimport pytest\n\nfrom pandas._libs.tslib import NaT, iNaT\nimport pandas.compat as compat\n\nimport pandas as pd\nfrom pandas import (\n Series, Timedelta, TimedeltaIndex, timedelta_range, to_timedelta)\nfrom pandas.core.tools.timedeltas import _coerce_scalar_to_timedelta_type as ct\nimport pandas.util.testing as tm\n\n\nclass TestTimedeltaArithmetic(object):\n\n def test_arithmetic_overflow(self):\n with pytest.raises(OverflowError):\n pd.Timestamp('1700-01-01') + pd.Timedelta(13 * 19999, unit='D')\n\n with pytest.raises(OverflowError):\n pd.Timestamp('1700-01-01') + timedelta(days=13 * 19999)\n\n def test_array_timedelta_floordiv(self):\n # https://github.com/pandas-dev/pandas/issues/19761\n ints = pd.date_range('2012-10-08', periods=4, freq='D').view('i8')\n msg = r\"Use 'array // timedelta.value'\"\n with tm.assert_produces_warning(FutureWarning) as m:\n result = ints // pd.Timedelta(1, unit='s')\n\n assert msg in str(m[0].message)\n expected = np.array([1349654400, 1349740800, 1349827200, 1349913600],\n dtype='i8')\n tm.assert_numpy_array_equal(result, expected)\n\n def test_ops_error_str(self):\n # GH 13624\n td = Timedelta('1 day')\n\n for left, right in [(td, 'a'), ('a', td)]:\n\n with pytest.raises(TypeError):\n left + right\n\n # GH 20829: python 2 comparison naturally does not raise TypeError\n if compat.PY3:\n with pytest.raises(TypeError):\n left > right\n\n assert not left == right\n assert left != right\n\n def test_ops_notimplemented(self):\n class Other(object):\n pass\n\n other = Other()\n\n td = Timedelta('1 day')\n assert td.__add__(other) is NotImplemented\n assert td.__sub__(other) is NotImplemented\n assert td.__truediv__(other) is NotImplemented\n assert td.__mul__(other) is NotImplemented\n assert td.__floordiv__(other) is NotImplemented\n\n def test_unary_ops(self):\n td = Timedelta(10, unit='d')\n\n # __neg__, __pos__\n assert -td == Timedelta(-10, unit='d')\n assert -td == Timedelta('-10d')\n assert +td == Timedelta(10, unit='d')\n\n # __abs__, __abs__(__neg__)\n assert abs(td) == td\n assert abs(-td) == td\n assert abs(-td) == Timedelta('10d')\n\n\nclass TestTimedeltaComparison(object):\n def test_comparison_object_array(self):\n # analogous to GH#15183\n td = Timedelta('2 days')\n other = Timedelta('3 hours')\n\n arr = np.array([other, td], dtype=object)\n res = arr == td\n expected = np.array([False, True], dtype=bool)\n assert (res == expected).all()\n\n # 2D case\n arr = np.array([[other, td],\n [td, other]],\n dtype=object)\n res = arr != td\n expected = np.array([[True, False], [False, True]], dtype=bool)\n assert res.shape == expected.shape\n assert (res == expected).all()\n\n def test_compare_timedelta_ndarray(self):\n # GH11835\n periods = [Timedelta('0 days 01:00:00'), Timedelta('0 days 01:00:00')]\n arr = np.array(periods)\n result = arr[0] > arr\n expected = np.array([False, False])\n tm.assert_numpy_array_equal(result, expected)\n\n def test_compare_custom_object(self):\n \"\"\"Make sure non supported operations on Timedelta returns NonImplemented\n and yields to other operand (GH20829).\"\"\"\n class CustomClass(object):\n\n def __init__(self, cmp_result=None):\n self.cmp_result = cmp_result\n\n def generic_result(self):\n if self.cmp_result is None:\n return NotImplemented\n else:\n return self.cmp_result\n\n def __eq__(self, other):\n return self.generic_result()\n\n def __gt__(self, other):\n return self.generic_result()\n\n t = Timedelta('1s')\n\n assert not (t == \"string\")\n assert not (t == 1)\n assert not (t == CustomClass())\n assert not (t == CustomClass(cmp_result=False))\n\n assert t < CustomClass(cmp_result=True)\n assert not (t < CustomClass(cmp_result=False))\n\n assert t == CustomClass(cmp_result=True)\n\n @pytest.mark.skipif(compat.PY2,\n reason=\"python 2 does not raise TypeError for \\\n comparisons of different types\")\n @pytest.mark.parametrize(\"val\", [\n \"string\", 1])\n def test_compare_unknown_type(self, val):\n # GH20829\n t = Timedelta('1s')\n with pytest.raises(TypeError):\n t >= val\n with pytest.raises(TypeError):\n t > val\n with pytest.raises(TypeError):\n t <= val\n with pytest.raises(TypeError):\n t < val\n\n\nclass TestTimedeltas(object):\n\n @pytest.mark.parametrize(\"unit, value, expected\", [\n ('us', 9.999, 9999), ('ms', 9.999999, 9999999),\n ('s', 9.999999999, 9999999999)])\n def test_rounding_on_int_unit_construction(self, unit, value, expected):\n # GH 12690\n result = Timedelta(value, unit=unit)\n assert result.value == expected\n result = Timedelta(str(value) + unit)\n assert result.value == expected\n\n def test_total_seconds_scalar(self):\n # see gh-10939\n rng = Timedelta('1 days, 10:11:12.100123456')\n expt = 1 * 86400 + 10 * 3600 + 11 * 60 + 12 + 100123456. / 1e9\n tm.assert_almost_equal(rng.total_seconds(), expt)\n\n rng = Timedelta(np.nan)\n assert np.isnan(rng.total_seconds())\n\n def test_conversion(self):\n\n for td in [Timedelta(10, unit='d'),\n Timedelta('1 days, 10:11:12.012345')]:\n pydt = td.to_pytimedelta()\n assert td == Timedelta(pydt)\n assert td == pydt\n assert (isinstance(pydt, timedelta) and not isinstance(\n pydt, Timedelta))\n\n assert td == np.timedelta64(td.value, 'ns')\n td64 = td.to_timedelta64()\n\n assert td64 == np.timedelta64(td.value, 'ns')\n assert td == td64\n\n assert isinstance(td64, np.timedelta64)\n\n # this is NOT equal and cannot be roundtriped (because of the nanos)\n td = Timedelta('1 days, 10:11:12.012345678')\n assert td != td.to_pytimedelta()\n\n def test_freq_conversion(self):\n\n # truediv\n td = Timedelta('1 days 2 hours 3 ns')\n result = td / np.timedelta64(1, 'D')\n assert result == td.value / float(86400 * 1e9)\n result = td / np.timedelta64(1, 's')\n assert result == td.value / float(1e9)\n result = td / np.timedelta64(1, 'ns')\n assert result == td.value\n\n # floordiv\n td = Timedelta('1 days 2 hours 3 ns')\n result = td // np.timedelta64(1, 'D')\n assert result == 1\n result = td // np.timedelta64(1, 's')\n assert result == 93600\n result = td // np.timedelta64(1, 'ns')\n assert result == td.value\n\n def test_fields(self):\n def check(value):\n # that we are int/long like\n assert isinstance(value, (int, compat.long))\n\n # compat to datetime.timedelta\n rng = to_timedelta('1 days, 10:11:12')\n assert rng.days == 1\n assert rng.seconds == 10 * 3600 + 11 * 60 + 12\n assert rng.microseconds == 0\n assert rng.nanoseconds == 0\n\n pytest.raises(AttributeError, lambda: rng.hours)\n pytest.raises(AttributeError, lambda: rng.minutes)\n pytest.raises(AttributeError, lambda: rng.milliseconds)\n\n # GH 10050\n check(rng.days)\n check(rng.seconds)\n check(rng.microseconds)\n check(rng.nanoseconds)\n\n td = Timedelta('-1 days, 10:11:12')\n assert abs(td) == Timedelta('13:48:48')\n assert str(td) == \"-1 days +10:11:12\"\n assert -td == Timedelta('0 days 13:48:48')\n assert -Timedelta('-1 days, 10:11:12').value == 49728000000000\n assert Timedelta('-1 days, 10:11:12').value == -49728000000000\n\n rng = to_timedelta('-1 days, 10:11:12.100123456')\n assert rng.days == -1\n assert rng.seconds == 10 * 3600 + 11 * 60 + 12\n assert rng.microseconds == 100 * 1000 + 123\n assert rng.nanoseconds == 456\n pytest.raises(AttributeError, lambda: rng.hours)\n pytest.raises(AttributeError, lambda: rng.minutes)\n pytest.raises(AttributeError, lambda: rng.milliseconds)\n\n # components\n tup = pd.to_timedelta(-1, 'us').components\n assert tup.days == -1\n assert tup.hours == 23\n assert tup.minutes == 59\n assert tup.seconds == 59\n assert tup.milliseconds == 999\n assert tup.microseconds == 999\n assert tup.nanoseconds == 0\n\n # GH 10050\n check(tup.days)\n check(tup.hours)\n check(tup.minutes)\n check(tup.seconds)\n check(tup.milliseconds)\n check(tup.microseconds)\n check(tup.nanoseconds)\n\n tup = Timedelta('-1 days 1 us').components\n assert tup.days == -2\n assert tup.hours == 23\n assert tup.minutes == 59\n assert tup.seconds == 59\n assert tup.milliseconds == 999\n assert tup.microseconds == 999\n assert tup.nanoseconds == 0\n\n def test_iso_conversion(self):\n # GH #21877\n expected = Timedelta(1, unit='s')\n assert to_timedelta('P0DT0H0M1S') == expected\n\n def test_nat_converters(self):\n assert to_timedelta('nat', box=False).astype('int64') == iNaT\n assert to_timedelta('nan', box=False).astype('int64') == iNaT\n\n @pytest.mark.parametrize('units, np_unit',\n [(['Y', 'y'], 'Y'),\n (['M'], 'M'),\n (['W', 'w'], 'W'),\n (['D', 'd', 'days', 'day', 'Days', 'Day'], 'D'),\n (['m', 'minute', 'min', 'minutes', 't',\n 'Minute', 'Min', 'Minutes', 'T'], 'm'),\n (['s', 'seconds', 'sec', 'second',\n 'S', 'Seconds', 'Sec', 'Second'], 's'),\n (['ms', 'milliseconds', 'millisecond', 'milli',\n 'millis', 'l', 'MS', 'Milliseconds',\n 'Millisecond', 'Milli', 'Millis', 'L'], 'ms'),\n (['us', 'microseconds', 'microsecond', 'micro',\n 'micros', 'u', 'US', 'Microseconds',\n 'Microsecond', 'Micro', 'Micros', 'U'], 'us'),\n (['ns', 'nanoseconds', 'nanosecond', 'nano',\n 'nanos', 'n', 'NS', 'Nanoseconds',\n 'Nanosecond', 'Nano', 'Nanos', 'N'], 'ns')])\n @pytest.mark.parametrize('wrapper', [np.array, list, pd.Index])\n def test_unit_parser(self, units, np_unit, wrapper):\n # validate all units, GH 6855, GH 21762\n for unit in units:\n # array-likes\n expected = TimedeltaIndex([np.timedelta64(i, np_unit)\n for i in np.arange(5).tolist()])\n result = to_timedelta(wrapper(range(5)), unit=unit)\n tm.assert_index_equal(result, expected)\n result = TimedeltaIndex(wrapper(range(5)), unit=unit)\n tm.assert_index_equal(result, expected)\n\n if unit == 'M':\n # M is treated as minutes in string repr\n expected = TimedeltaIndex([np.timedelta64(i, 'm')\n for i in np.arange(5).tolist()])\n\n str_repr = ['{}{}'.format(x, unit) for x in np.arange(5)]\n result = to_timedelta(wrapper(str_repr))\n tm.assert_index_equal(result, expected)\n result = TimedeltaIndex(wrapper(str_repr))\n tm.assert_index_equal(result, expected)\n\n # scalar\n expected = Timedelta(np.timedelta64(2, np_unit).astype(\n 'timedelta64[ns]'))\n\n result = to_timedelta(2, unit=unit)\n assert result == expected\n result = Timedelta(2, unit=unit)\n assert result == expected\n\n if unit == 'M':\n expected = Timedelta(np.timedelta64(2, 'm').astype(\n 'timedelta64[ns]'))\n\n result = to_timedelta('2{}'.format(unit))\n assert result == expected\n result = Timedelta('2{}'.format(unit))\n assert result == expected\n\n def test_numeric_conversions(self):\n assert ct(0) == np.timedelta64(0, 'ns')\n assert ct(10) == np.timedelta64(10, 'ns')\n assert ct(10, unit='ns') == np.timedelta64(10, 'ns').astype('m8[ns]')\n\n assert ct(10, unit='us') == np.timedelta64(10, 'us').astype('m8[ns]')\n assert ct(10, unit='ms') == np.timedelta64(10, 'ms').astype('m8[ns]')\n assert ct(10, unit='s') == np.timedelta64(10, 's').astype('m8[ns]')\n assert ct(10, unit='d') == np.timedelta64(10, 'D').astype('m8[ns]')\n\n def test_timedelta_conversions(self):\n assert (ct(timedelta(seconds=1)) ==\n np.timedelta64(1, 's').astype('m8[ns]'))\n assert (ct(timedelta(microseconds=1)) ==\n np.timedelta64(1, 'us').astype('m8[ns]'))\n assert (ct(timedelta(days=1)) ==\n np.timedelta64(1, 'D').astype('m8[ns]'))\n\n def test_round(self):\n\n t1 = Timedelta('1 days 02:34:56.789123456')\n t2 = Timedelta('-1 days 02:34:56.789123456')\n\n for (freq, s1, s2) in [('N', t1, t2),\n ('U', Timedelta('1 days 02:34:56.789123000'),\n Timedelta('-1 days 02:34:56.789123000')),\n ('L', Timedelta('1 days 02:34:56.789000000'),\n Timedelta('-1 days 02:34:56.789000000')),\n ('S', Timedelta('1 days 02:34:57'),\n Timedelta('-1 days 02:34:57')),\n ('2S', Timedelta('1 days 02:34:56'),\n Timedelta('-1 days 02:34:56')),\n ('5S', Timedelta('1 days 02:34:55'),\n Timedelta('-1 days 02:34:55')),\n ('T', Timedelta('1 days 02:35:00'),\n Timedelta('-1 days 02:35:00')),\n ('12T', Timedelta('1 days 02:36:00'),\n Timedelta('-1 days 02:36:00')),\n ('H', Timedelta('1 days 03:00:00'),\n Timedelta('-1 days 03:00:00')),\n ('d', Timedelta('1 days'),\n Timedelta('-1 days'))]:\n r1 = t1.round(freq)\n assert r1 == s1\n r2 = t2.round(freq)\n assert r2 == s2\n\n # invalid\n for freq in ['Y', 'M', 'foobar']:\n pytest.raises(ValueError, lambda: t1.round(freq))\n\n t1 = timedelta_range('1 days', periods=3, freq='1 min 2 s 3 us')\n t2 = -1 * t1\n t1a = timedelta_range('1 days', periods=3, freq='1 min 2 s')\n t1c = pd.TimedeltaIndex([1, 1, 1], unit='D')\n\n # note that negative times round DOWN! so don't give whole numbers\n for (freq, s1, s2) in [('N', t1, t2),\n ('U', t1, t2),\n ('L', t1a,\n TimedeltaIndex(['-1 days +00:00:00',\n '-2 days +23:58:58',\n '-2 days +23:57:56'],\n dtype='timedelta64[ns]',\n freq=None)\n ),\n ('S', t1a,\n TimedeltaIndex(['-1 days +00:00:00',\n '-2 days +23:58:58',\n '-2 days +23:57:56'],\n dtype='timedelta64[ns]',\n freq=None)\n ),\n ('12T', t1c,\n TimedeltaIndex(['-1 days',\n '-1 days',\n '-1 days'],\n dtype='timedelta64[ns]',\n freq=None)\n ),\n ('H', t1c,\n TimedeltaIndex(['-1 days',\n '-1 days',\n '-1 days'],\n dtype='timedelta64[ns]',\n freq=None)\n ),\n ('d', t1c,\n pd.TimedeltaIndex([-1, -1, -1], unit='D')\n )]:\n\n r1 = t1.round(freq)\n tm.assert_index_equal(r1, s1)\n r2 = t2.round(freq)\n tm.assert_index_equal(r2, s2)\n\n # invalid\n for freq in ['Y', 'M', 'foobar']:\n pytest.raises(ValueError, lambda: t1.round(freq))\n\n def test_contains(self):\n # Checking for any NaT-like objects\n # GH 13603\n td = to_timedelta(range(5), unit='d') + pd.offsets.Hour(1)\n for v in [pd.NaT, None, float('nan'), np.nan]:\n assert not (v in td)\n\n td = to_timedelta([pd.NaT])\n for v in [pd.NaT, None, float('nan'), np.nan]:\n assert (v in td)\n\n def test_identity(self):\n\n td = Timedelta(10, unit='d')\n assert isinstance(td, Timedelta)\n assert isinstance(td, timedelta)\n\n def test_short_format_converters(self):\n def conv(v):\n return v.astype('m8[ns]')\n\n assert ct('10') == np.timedelta64(10, 'ns')\n assert ct('10ns') == np.timedelta64(10, 'ns')\n assert ct('100') == np.timedelta64(100, 'ns')\n assert ct('100ns') == np.timedelta64(100, 'ns')\n\n assert ct('1000') == np.timedelta64(1000, 'ns')\n assert ct('1000ns') == np.timedelta64(1000, 'ns')\n assert ct('1000NS') == np.timedelta64(1000, 'ns')\n\n assert ct('10us') == np.timedelta64(10000, 'ns')\n assert ct('100us') == np.timedelta64(100000, 'ns')\n assert ct('1000us') == np.timedelta64(1000000, 'ns')\n assert ct('1000Us') == np.timedelta64(1000000, 'ns')\n assert ct('1000uS') == np.timedelta64(1000000, 'ns')\n\n assert ct('1ms') == np.timedelta64(1000000, 'ns')\n assert ct('10ms') == np.timedelta64(10000000, 'ns')\n assert ct('100ms') == np.timedelta64(100000000, 'ns')\n assert ct('1000ms') == np.timedelta64(1000000000, 'ns')\n\n assert ct('-1s') == -np.timedelta64(1000000000, 'ns')\n assert ct('1s') == np.timedelta64(1000000000, 'ns')\n assert ct('10s') == np.timedelta64(10000000000, 'ns')\n assert ct('100s') == np.timedelta64(100000000000, 'ns')\n assert ct('1000s') == np.timedelta64(1000000000000, 'ns')\n\n assert ct('1d') == conv(np.timedelta64(1, 'D'))\n assert ct('-1d') == -conv(np.timedelta64(1, 'D'))\n assert ct('1D') == conv(np.timedelta64(1, 'D'))\n assert ct('10D') == conv(np.timedelta64(10, 'D'))\n assert ct('100D') == conv(np.timedelta64(100, 'D'))\n assert ct('1000D') == conv(np.timedelta64(1000, 'D'))\n assert ct('10000D') == conv(np.timedelta64(10000, 'D'))\n\n # space\n assert ct(' 10000D ') == conv(np.timedelta64(10000, 'D'))\n assert ct(' - 10000D ') == -conv(np.timedelta64(10000, 'D'))\n\n # invalid\n pytest.raises(ValueError, ct, '1foo')\n pytest.raises(ValueError, ct, 'foo')\n\n def test_full_format_converters(self):\n def conv(v):\n return v.astype('m8[ns]')\n\n d1 = np.timedelta64(1, 'D')\n\n assert ct('1days') == conv(d1)\n assert ct('1days,') == conv(d1)\n assert ct('- 1days,') == -conv(d1)\n\n assert ct('00:00:01') == conv(np.timedelta64(1, 's'))\n assert ct('06:00:01') == conv(np.timedelta64(6 * 3600 + 1, 's'))\n assert ct('06:00:01.0') == conv(np.timedelta64(6 * 3600 + 1, 's'))\n assert ct('06:00:01.01') == conv(np.timedelta64(\n 1000 * (6 * 3600 + 1) + 10, 'ms'))\n\n assert (ct('- 1days, 00:00:01') ==\n conv(-d1 + np.timedelta64(1, 's')))\n assert (ct('1days, 06:00:01') ==\n conv(d1 + np.timedelta64(6 * 3600 + 1, 's')))\n assert (ct('1days, 06:00:01.01') ==\n conv(d1 + np.timedelta64(1000 * (6 * 3600 + 1) + 10, 'ms')))\n\n # invalid\n pytest.raises(ValueError, ct, '- 1days, 00')\n\n def test_overflow(self):\n # GH 9442\n s = Series(pd.date_range('20130101', periods=100000, freq='H'))\n s[0] += pd.Timedelta('1s 1ms')\n\n # mean\n result = (s - s.min()).mean()\n expected = pd.Timedelta((pd.DatetimeIndex((s - s.min())).asi8 / len(s)\n ).sum())\n\n # the computation is converted to float so\n # might be some loss of precision\n assert np.allclose(result.value / 1000, expected.value / 1000)\n\n # sum\n pytest.raises(ValueError, lambda: (s - s.min()).sum())\n s1 = s[0:10000]\n pytest.raises(ValueError, lambda: (s1 - s1.min()).sum())\n s2 = s[0:1000]\n result = (s2 - s2.min()).sum()\n\n def test_pickle(self):\n\n v = Timedelta('1 days 10:11:12.0123456')\n v_p = tm.round_trip_pickle(v)\n assert v == v_p\n\n def test_timedelta_hash_equality(self):\n # GH 11129\n v = Timedelta(1, 'D')\n td = timedelta(days=1)\n assert hash(v) == hash(td)\n\n d = {td: 2}\n assert d[v] == 2\n\n tds = timedelta_range('1 second', periods=20)\n assert all(hash(td) == hash(td.to_pytimedelta()) for td in tds)\n\n # python timedeltas drop ns resolution\n ns_td = Timedelta(1, 'ns')\n assert hash(ns_td) != hash(ns_td.to_pytimedelta())\n\n def test_implementation_limits(self):\n min_td = Timedelta(Timedelta.min)\n max_td = Timedelta(Timedelta.max)\n\n # GH 12727\n # timedelta limits correspond to int64 boundaries\n assert min_td.value == np.iinfo(np.int64).min + 1\n assert max_td.value == np.iinfo(np.int64).max\n\n # Beyond lower limit, a NAT before the Overflow\n assert (min_td - Timedelta(1, 'ns')) is NaT\n\n with pytest.raises(OverflowError):\n min_td - Timedelta(2, 'ns')\n\n with pytest.raises(OverflowError):\n max_td + Timedelta(1, 'ns')\n\n # Same tests using the internal nanosecond values\n td = Timedelta(min_td.value - 1, 'ns')\n assert td is NaT\n\n with pytest.raises(OverflowError):\n Timedelta(min_td.value - 2, 'ns')\n\n with pytest.raises(OverflowError):\n Timedelta(max_td.value + 1, 'ns')\n\n def test_total_seconds_precision(self):\n # GH 19458\n assert Timedelta('30S').total_seconds() == 30.0\n assert Timedelta('0').total_seconds() == 0.0\n assert Timedelta('-2S').total_seconds() == -2.0\n assert Timedelta('5.324S').total_seconds() == 5.324\n assert (Timedelta('30S').total_seconds() - 30.0) < 1e-20\n assert (30.0 - Timedelta('30S').total_seconds()) < 1e-20\n\n def test_timedelta_arithmetic(self):\n data = pd.Series(['nat', '32 days'], dtype='timedelta64[ns]')\n deltas = [timedelta(days=1), Timedelta(1, unit='D')]\n for delta in deltas:\n result_method = data.add(delta)\n result_operator = data + delta\n expected = pd.Series(['nat', '33 days'], dtype='timedelta64[ns]')\n tm.assert_series_equal(result_operator, expected)\n tm.assert_series_equal(result_method, expected)\n\n result_method = data.sub(delta)\n result_operator = data - delta\n expected = pd.Series(['nat', '31 days'], dtype='timedelta64[ns]')\n tm.assert_series_equal(result_operator, expected)\n tm.assert_series_equal(result_method, expected)\n # GH 9396\n result_method = data.div(delta)\n result_operator = data / delta\n expected = pd.Series([np.nan, 32.], dtype='float64')\n tm.assert_series_equal(result_operator, expected)\n tm.assert_series_equal(result_method, expected)\n\n def test_apply_to_timedelta(self):\n timedelta_NaT = pd.to_timedelta('NaT')\n\n list_of_valid_strings = ['00:00:01', '00:00:02']\n a = pd.to_timedelta(list_of_valid_strings)\n b = Series(list_of_valid_strings).apply(pd.to_timedelta)\n # Can't compare until apply on a Series gives the correct dtype\n # assert_series_equal(a, b)\n\n list_of_strings = ['00:00:01', np.nan, pd.NaT, timedelta_NaT]\n\n # TODO: unused?\n a = pd.to_timedelta(list_of_strings) # noqa\n b = Series(list_of_strings).apply(pd.to_timedelta) # noqa\n # Can't compare until apply on a Series gives the correct dtype\n # assert_series_equal(a, b)\n\n def test_components(self):\n rng = timedelta_range('1 days, 10:11:12', periods=2, freq='s')\n rng.components\n\n # with nat\n s = Series(rng)\n s[1] = np.nan\n\n result = s.dt.components\n assert not result.iloc[0].isna().all()\n assert result.iloc[1].isna().all()\n\n\[email protected]('value, expected', [\n (Timedelta('10S'), True),\n (Timedelta('-10S'), True),\n (Timedelta(10, unit='ns'), True),\n (Timedelta(0, unit='ns'), False),\n (Timedelta(-10, unit='ns'), True),\n (Timedelta(None), True),\n (pd.NaT, True),\n])\ndef test_truthiness(value, expected):\n # https://github.com/pandas-dev/pandas/issues/21484\n assert bool(value) is expected\n"
] |
[
[
"pandas.Series",
"pandas.util.testing.assert_produces_warning",
"pandas.util.testing.assert_index_equal",
"numpy.iinfo",
"pandas.util.testing.round_trip_pickle",
"pandas.util.testing.assert_numpy_array_equal",
"numpy.allclose",
"pandas.Timestamp",
"numpy.arange",
"pandas.util.testing.assert_series_equal",
"pandas.core.tools.timedeltas._coerce_scalar_to_timedelta_type",
"pandas.Timedelta",
"numpy.timedelta64",
"pandas.date_range",
"numpy.array",
"pandas.timedelta_range",
"pandas.TimedeltaIndex",
"pandas.to_timedelta",
"pandas.offsets.Hour"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rsiverd/kwhere
|
[
"5acd561b1e58d254d80a94f8a5e1a6af6bf17547"
] |
[
"angle.py"
] |
[
"#!/usr/bin/env python\n# vim: set fileencoding=utf-8 :\n#\n# Some useful functions for angle manipulation.\n#\n# Rob Siverd\n# Created: 2011-04-25\n# Last modified: 2018-07-19\n#--------------------------------------------------------------------------\n#**************************************************************************\n#--------------------------------------------------------------------------\n\n## Current version:\n__version__ = \"1.7.7\"\n\n## Required modules:\nimport numpy as np\nimport sys\n\n##--------------------------------------------------------------------------##\n## Angle manipulation class:\n#class angle:\n\n ### Initialization:\n #def __init__(self):\n # pass\n\n ##-----------------------------------------------------------------------##\n ## Angle reduction functions:\n \n# Reduce angle (degrees) to 0 < ang < 360:\ndef SmallDeg(dAngle):\n \"\"\"\n Reduce input angle (degrees) to 0 < angle < 360.\n \"\"\"\n return (360.0 * ((dAngle / 360.0) - np.floor(dAngle / 360.0)))\n\n# Reduce angle (radians) to 0 < ang < 2pi:\ndef SmallRad(rAngle):\n \"\"\"\n Reduce input angle (radians) to 0 < angle < 2pi.\n \"\"\"\n twopi = 2.0 * np.pi\n return (twopi * ((rAngle / twopi) - np.floor(rAngle / twopi)))\n\n# Reduce angle (hours) to 0 < ang < 24:\ndef SmallHour(hAngle):\n \"\"\"\n Reduce input angle (hours) to 0 < angle < 24.\n \"\"\"\n return (24.0 * ((hAngle / 24.0) - np.floor(hAngle / 24.0)))\n\n##-----------------------------------------------------------------------##\n## Dimensions-checker:\n#def _calc_result_dims(ra1, de1, ra2, de2):\ndef _coord_dims_okay(ra1, de1, ra2, de2):\n tra1, tde1 = np.atleast_1d(ra1), np.atleast_1d(de1)\n tra2, tde2 = np.atleast_1d(ra2), np.atleast_1d(de2)\n if (tra1.size != tde1.size):\n sys.stderr.write(\"Input dimension mismatch: ra1, de1\\n\")\n return False\n #return None\n if (tra2.size != tde2.size):\n sys.stderr.write(\"Input dimension mismatch: ra2, de2\\n\")\n return False\n #return None\n if (tra1.size != tra2.size) and (tra1.size > 1) and (tra2.size > 1):\n sys.stderr.write(\"Incompatible dimensions detected!\\n\")\n sys.stderr.write(\"ra1.size == de1.size == %s\\n\" % tra1.size)\n sys.stderr.write(\"ra2.size == de2.size == %s\\n\" % tra2.size)\n return False\n #return None\n #return max(tra1.size, tra2.size)\n return True\n\n## Compute angular separation (radians):\ndef rAngSep(ra1r, dec1r, ra2r, dec2r): #, safe=True):\n \"\"\"\n Compute angular separation(s) with a dot product. All input/output is\n in radians. Inputs are converted to Cartesian coordinates and their\n dot product is computed. The arccosine of the dot product is the\n angular separation (since A dot B = |A||B| * cos(angular_separation).\n \"\"\"\n\n # Figure out dimensions:\n if not _coord_dims_okay(ra1r, dec1r, ra2r, dec2r):\n return None\n #result_size = _calc_result_dims(ra1r, dec1r, ra2r, dec2r)\n #if result_size == None:\n # return None\n\n # Angular differences:\n equal = (ra1r == ra2r) & (dec1r == dec2r)\n #sys.stderr.write(\"equal: %s\\n\" % str(equal))\n #sys.stderr.write(\"equal.dtype: %s\\n\" % equal.dtype)\n #sys.stderr.write(\"which: %s\\n\" % str(which))\n angsep = np.zeros_like(equal, dtype='float')\n #x1 = np.cos(dec1r) * np.cos(ra1r)\n #y1 = np.cos(dec1r) * np.sin(ra1r)\n #z1 = np.sin(dec1r)\n #x2 = np.cos(dec2r) * np.cos(ra2r)\n #y2 = np.cos(dec2r) * np.sin(ra2r)\n #z2 = np.sin(dec2r)\n #dot = x1*x2 + y1*y2 + z1*z2\n dot = np.sin(dec1r) * np.sin(dec2r) \\\n + np.cos(dec1r) * np.cos(dec2r) * np.cos(ra1r - ra2r)\n #sys.stderr.write(\"dot: %s\\n\" % str(dot))\n #oob = (dot < -1) | (1 < dot)\n #sys.stderr.write(\"oob: %s\\n\" % str(oob))\n #angsep[~oob] = np.arccos(dot[~oob])\n #sys.stderr.write(\"angsep[~equal]: %s\\n\" % str(angsep[~equal]))\n #sys.stderr.write(\"dot[~equal]: %s\\n\" % str(dot[~equal]))\n #angsep[~equal] = np.arccos(dot[~equal])\n #angsep[~equal] = np.arccos(dot[~equal])\n angsep[~equal] = np.arccos(dot[~equal])\n return angsep\n #return np.arccos(dot)\n # ALTERNATIVE:\n # dot = np.sin(dec1r) * np.sin(dec2r) +\n # np.cos(dec1r) * np.cos(dec2r) * np.cos(ra1r - ra2r)\n\n## Angular separation in degrees (a wrapper for the above):\ndef dAngSep(ra1d, dec1d, ra2d, dec2d): #, safe=True):\n \"\"\"\n Compute angular separation(s) using a dot product. This is a wrapper\n for the rAngSep() function. See its docstring for more info.\n \"\"\"\n ra1r, dec1r = np.radians(ra1d), np.radians(dec1d)\n ra2r, dec2r = np.radians(ra2d), np.radians(dec2d)\n return np.degrees(rAngSep(ra1r, dec1r, ra2r, dec2r)) #, safe=safe))\n\n##-----------------------------------------------------------------------##\n## Convert Azm/Alt/Lat to HA/Dec (radians, azm reckoned E from N):\ndef rAzmAltLat_2_HADec(rAzm, rAlt, rLat):\n \"\"\"\n Convert (azimuth, altitude, latitude) --> (hour angle, declination).\n Inputs must be given in RADIANS.\n Output will be given in RADIANS.\n \"\"\"\n sys.stderr.write(\"WARNING: this needs N/S sanity check!\\n\")\n # Calculate HA:\n numer = np.sin(rAzm)\n denom = np.cos(rAzm)*np.sin(rLat) + np.tan(rAlt)*np.cos(rLat)\n rHA = np.arctan2(numer, denom)\n # Calculate Dec:\n dummy = np.sin(rLat)*np.sin(rAlt) \\\n - np.cos(rLat)*np.cos(rAlt)*np.cos(rAzm)\n rDec = np.arcsin(dummy)\n return (rHA, rDec)\n\n## Convert Azm/Alt/Lat to HA/Dec (degrees, azm reckoned E from N):\ndef dAzmAltLat_2_HADec(dAzm, dAlt, dLat):\n \"\"\"\n Convert (azimuth, altitude, latitude) --> (hour angle, declination).\n Inputs must be given in DEGREES.\n Output will be given in DEGREES.\n \"\"\"\n #rAzm, rAlt, rLat = np.radians([dAzm, dAlt, dLat])\n rAzm, rAlt, rLat = np.radians(dAzm), np.radians(dAlt), np.radians(dLat)\n #print rAzm,rAlt,rLat\n rHA, rDec = rAzmAltLat_2_HADec(rAzm, rAlt, rLat)\n return (np.degrees(rHA), np.degrees(rDec))\n #return np.degrees(rAzmAltLat_2_HADec(rAzm, rAlt, rLat))\n\n##-----------------------------------------------------------------------##\n##-----------------------------------------------------------------------##\n\n##-----------------------------------------------------------------------##\n## Convert HA/Dec/Lat to Azm/Alt (radians, Azm *WEST from SOUTH*):\ndef rHADecLat_2_sAzAlt(rHA, rDec, rLat):\n \"\"\"\n Converts (hour angle, declination, latitude) --> (azimuth, altitude).\n -- Azimuth is reckoned *WEST from SOUTH*.\n -- Latitude is measured positively northwards.\n -- Hour Angle is measured positively westwards.\n -- Inputs must be given in RADIANS.\n -- Output will be given in RADIANS.\n \"\"\"\n # Calculate Azm:\n numer = np.sin(rHA)\n denom = np.cos(rHA)*np.sin(rLat) - np.tan(rDec)*np.cos(rLat)\n rAzm = np.arctan2(numer, denom)\n # Calculate Alt:\n dummy = np.sin(rLat)*np.sin(rDec) \\\n + np.cos(rLat)*np.cos(rDec)*np.cos(rHA)\n rAlt = np.arcsin(dummy)\n return (rAzm, rAlt)\n\n##-----------------------------------------------------------------------##\n## Convert HA/Dec/Lat to Azm/Alt (degrees, Azm WEST from SOUTH):\ndef dHADecLat_2_sAzAlt(dHA, dDec, dLat):\n \"\"\"\n Converts (hour angle, declination, latitude) --> (azimuth, altitude).\n -- Azimuth is reckoned *WEST from SOUTH*.\n -- Latitude is measured positively northwards.\n -- Hour Angle is measured positively westwards.\n -- Inputs must be given in DEGREES.\n -- Output will be given in DEGREES.\n \"\"\"\n rHA, rDec, rLat = np.radians(dHA), np.radians(dDec), np.radians(dLat)\n rAz, rAlt = rHADecLat_2_sAzAlt(rHA, rDec, rLat)\n return (np.degrees(rAz), np.degrees(rAlt))\n\n##-----------------------------------------------------------------------##\n## Convert HA/Dec/Lat to Azm/Alt (radians, Azm EAST from NORTH):\ndef rHADecLat_2_nAzAlt(rHA, rDec, rLat):\n \"\"\"\n Converts (hour angle, declination, latitude) --> (azimuth, altitude).\n -- Azimuth is reckoned *EAST from NORTH*.\n -- Latitude is measured positively northwards.\n -- Hour Angle is measured positively westwards.\n -- Inputs must be given in RADIANS.\n -- Output will be given in RADIANS.\n \"\"\"\n rAz, rAlt = rHADecLat_2_sAzAlt(rHA, rDec, rLat)\n rAz = (rAz + np.pi) % np.radians(360.0)\n return (rAz, rAlt)\n\n##-----------------------------------------------------------------------##\n## Convert HA/Dec/Lat to Azm/Alt (degrees, Azm EAST from NORTH):\ndef dHADecLat_2_nAzAlt(dHA, dDec, dLat):\n \"\"\"\n Converts (hour angle, declination, latitude) --> (azimuth, altitude).\n -- Azimuth is reckoned *EAST from NORTH*.\n -- Latitude is measured positively northwards.\n -- Hour Angle is measured positively westwards.\n -- Inputs must be given in DEGREES.\n -- Output will be given in DEGREES.\n \"\"\"\n # Unit conversion:\n rHA, rDec, rLat = np.radians(dHA), np.radians(dDec), np.radians(dLat)\n rAz, rAlt = rHADecLat_2_nAzAlt(rHA, rDec, rLat)\n return (np.degrees(rAz), np.degrees(rAlt))\n\n##--------------------------------------------------------------------------##\n##********************* Spherical Location Estimates: *********************##\n##--------------------------------------------------------------------------##\n\n## Average direction of vectors on unit sphere (RADIANS):\ndef spheremean_rad(RA_rad, DE_rad, dev=False):\n \"\"\"\n Compute mean (RA, Dec) for a set of RA, Dec directions (RADIANS).\n Returns:\n (avg_RA, avg_DE) # dev=False\n (avg_RA, avg_DE, angvar) # dev=True\n \"\"\"\n vecX = np.sum(np.cos(DE_rad) * np.cos(RA_rad)) # total X length\n vecY = np.sum(np.cos(DE_rad) * np.sin(RA_rad)) # total Y length\n vecZ = np.sum(np.sin(DE_rad)) # total Z length\n R_tot = np.sqrt(vecX*vecX + vecY*vecY + vecZ*vecZ) # total distance\n angvar = 1.0 - R_tot # 'circular' variance\n avg_DE = np.arcsin(vecZ / R_tot)\n avg_RA = np.arctan2(vecY, vecX) % (2.0 * np.pi)\n if dev:\n return (avg_RA, avg_DE, angvar)\n else:\n return (avg_RA, avg_DE)\n\n## Average direction of vectors on unit sphere (RADIANS):\ndef spheremean_deg(RA_deg, DE_deg, dev=False):\n \"\"\"\n Compute spherical mean and angular variance for a set of RA, Dec \n coordinates (DEGREES).\n \n Returns:\n (avg_RA, avg_DE) # dev=False\n (avg_RA, avg_DE, angvar) # dev=True\n \"\"\"\n RA_rad = np.radians(RA_deg)\n DE_rad = np.radians(DE_deg)\n avg_RA_r, avg_DE_r, angvar = spheremean_rad(RA_rad, DE_rad, dev=True)\n if dev:\n return (np.degrees(avg_RA_r), np.degrees(avg_DE_r), angvar)\n else:\n return (np.degrees(avg_RA_r), np.degrees(avg_DE_r))\n\n## Medoid direction and scatter of vectors on unit sphere (RADIANS):\ndef sphere_medoid_rad(RA_rad, DE_rad, dev=False):\n \"\"\"\n Find medoid of a set of RA, Dec coordinates (RADIANS). Optionally returns\n sum of absolute differences of input coordinates from medoid.\n\n Returns:\n (med_RA, med_DE) # dev=False\n (med_RA, med_DE, sum_abs_diffs) # dev=True\n \"\"\"\n sad = np.zeros_like(RA_rad) # sum of absolute diffs\n for i, (try_RA, try_DE) in enumerate(zip(RA_rad, DE_rad)):\n sad[i] = np.sum(rAngSep(RA_rad, DE_rad, try_RA, try_DE))\n mid = sad.argmin()\n if dev:\n return (RA_rad[mid], DE_rad[mid], sad[mid])\n else:\n return (RA_rad[mid], DE_rad[mid])\n\n## Medoid direction and scatter of vectors on unit sphere (DEGREES):\ndef sphere_medoid_deg(RA_deg, DE_deg, dev=False):\n \"\"\"\n Find medoid of a set of RA, Dec coordinates (DEGREES). Optionally returns\n sum of absolute differences of input coordinates from medoid.\n\n Returns:\n (med_RA, med_DE) # dev=False\n (med_RA, med_DE, sum_abs_diffs) # dev=True\n \"\"\"\n RA_rad = np.radians(RA_deg)\n DE_rad = np.radians(DE_deg)\n med_RA_r, med_DE_r, sad = sphere_medoid_rad(RA_rad, DE_rad, dev=True)\n if dev:\n return (np.degrees(med_RA_r), np.degrees(med_DE_r), sad)\n else:\n return (np.degrees(med_RA_r), np.degrees(med_DE_r))\n\n\n\n######################################################################\n# CHANGELOG (angle.py):\n#---------------------------------------------------------------------\n#\n# 2018-07-19:\n# -- Increased __version__ to 1.7.7.\n# -- Degree form of AzAltLat->HADec now returns a tuple like its radian\n# counterpart.\n# -- Simplified radian conversion in AzAltLat->HADec converter.\n#\n# 2018-02-12:\n# -- Increased __version__ to 1.7.6.\n# -- Fixed check for bad/inconsistent dimensions used in AngSep routines.\n# Previously, the (allowed) case of all dimensions equal was not\n# counted as valid. Now it should work.\n#\n# 2018-02-03:\n# -- Increased __version__ to 1.7.5.\n# -- Results of rAngSep are now initialized to size of 'equal' array with\n# explicit float type. This guarantees the proper type/dimensions.\n# -- Added dimensionality check routine to detect bogus rAngSep inputs.\n#\n# 2018-01-25:\n# -- Increased __version__ to 1.7.0.\n# -- rAngSep and dAngSep now explicitly handle cases of equal inputs to\n# ensure 0-valued results and avoid NaNs in response.\n# -- Added sphere_medoid_deg() method.\n# -- Indentation is now 4 spaces.\n#\n# 2017-12-13:\n# -- Increased __version__ to 1.6.5.\n# -- Now convert each input to radians separately in dAngSep(). This\n# ensures that dimensionality and axes are preserved (the old\n# syntax did not seem to do this with multi-dimensional arrays).\n#\n# 2016-11-08:\n# -- Increased __version__ to 1.6.0.\n# -- Fixed array support in dHADecLat_2_nAzAlt().\n#\n# 2015-01-02:\n# -- Increased __version__ to 1.5.0.\n# -- Added sphere_medoid_rad() \n# -- Added spheremean_rad() and spheremean_deg() directional averages.\n#\n# 2014-04-28:\n# -- Increased __version__ to 1.0.1.\n# -- Now explicitly specify UTF-8 file encoding.\n#\n# 2012-09-26:\n# -- Added docstrings to several functions.\n#\n# 2012-06-27:\n# -- Added azm/alt/lat --> HA/dec conversion functions.\n# -- Added HA/dec/lat --> azm/alt conversion functions.\n# -- Improved docstring formatting.\n#\n# 2011-04-25:\n# -- Added SmallDeg, SmallRad, & SmallHour functions.\n# -- First created angle.py.\n#\n"
] |
[
[
"numpy.radians",
"numpy.sqrt",
"numpy.arcsin",
"numpy.degrees",
"numpy.arccos",
"numpy.cos",
"numpy.sin",
"numpy.arctan2",
"numpy.atleast_1d",
"numpy.tan",
"numpy.zeros_like",
"numpy.floor"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
zhangmozhe/torch3d
|
[
"d47e9b243e520f9c0c72a26c271d2c7ad242cb65",
"d47e9b243e520f9c0c72a26c271d2c7ad242cb65"
] |
[
"torch3d/nn/deconv.py",
"torch3d/models/segmentation/pointnet.py"
] |
[
"import torch\nimport torch.nn as nn\nfrom torch3d.nn import functional as F\nfrom torch3d.nn.utils import _single\n\n\nclass FeaturePropagation(nn.Sequential):\n \"\"\"\n The feature propagation from the `\"PointNet++: Deep Hierarchical Feature Learning on Point Sets in a Metric Space\" <https://arxiv.org/abs/1706.02413>`_ paper.\n\n Args:\n in_channels (int): Number of channels in the input point set\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int, optional): Neighborhood size of the convolution kernel. Default: 1\n bias (bool, optional): If True, adds a learnable bias to the output. Default: ``True``\n \"\"\" # noqa\n\n def __init__(self, in_channels, out_channels, kernel_size=1, bias=True):\n self.kernel_size = kernel_size\n in_channels = in_channels\n out_channels = _single(out_channels)\n modules = []\n for channels in out_channels:\n modules.append(nn.Conv1d(in_channels, channels, 1, bias=bias))\n modules.append(nn.BatchNorm1d(channels))\n modules.append(nn.ReLU(True))\n in_channels = channels\n super(FeaturePropagation, self).__init__(*modules)\n\n def forward(self, x, y):\n p, x = x[:, :3], x[:, 3:]\n q, y = y[:, :3], y[:, 3:]\n x = F.interpolate(p, q, x, self.kernel_size)\n x = torch.cat([x, y], dim=1)\n x = super(FeaturePropagation, self).forward(x)\n x = torch.cat([q, x], dim=1)\n return x\n\n\nclass PointDeconv(nn.Module):\n \"\"\"\n The point deconvolution layer from the `\"PointConv: Deep Convolutional Networks on 3D Point Clouds\" <https://arxiv.org/abs/1811.07246>`_ paper.\n\n Args:\n in_channels (int): Number of channels in the input point set\n out_channels (int): Number of channels produced by the convolution\n kernel_size (int, optional): Neighborhood size of the convolution kernel. Default: 1\n bandwidth (float, optional): Bandwidth of kernel density estimation. Default: 1.0\n bias (bool, optional): If True, adds a learnable bias to the output. Default: ``True``\n \"\"\" # noqa\n\n def __init__(\n self, in_channels, out_channels, kernel_size=1, bandwidth=1.0, bias=True\n ):\n super(PointDeconv, self).__init__()\n self.kernel_size = kernel_size\n self.bandwidth = bandwidth\n self.scale = nn.Sequential(\n nn.Conv1d(1, 8, 1, bias=bias),\n nn.BatchNorm1d(8),\n nn.ReLU(True),\n nn.Conv1d(8, 8, 1, bias=bias),\n nn.BatchNorm1d(8),\n nn.ReLU(True),\n nn.Conv1d(8, 1, 1, bias=bias),\n nn.Sigmoid(),\n )\n self.weight = nn.Sequential(\n nn.Conv2d(3, 8, 1, bias=bias),\n nn.BatchNorm2d(8),\n nn.ReLU(True),\n nn.Conv2d(8, 8, 1, bias=bias),\n nn.BatchNorm2d(8),\n nn.ReLU(True),\n nn.Conv2d(8, 16, 1, bias=bias),\n )\n in_channels = in_channels\n out_channels = _single(out_channels)\n modules = []\n for channels in out_channels[:-1]:\n modules.append(nn.Conv2d(in_channels, channels, 1, bias=bias))\n modules.append(nn.BatchNorm2d(channels))\n modules.append(nn.ReLU(True))\n in_channels = channels\n self.mlp = nn.Sequential(*modules)\n self.lin = nn.Sequential(\n nn.Conv2d(in_channels, out_channels[-1], [16, 1], bias=bias),\n nn.BatchNorm2d(out_channels[-1]),\n nn.ReLU(True),\n )\n\n def forward(self, x, y):\n batch_size = x.shape[0]\n p, x = x[:, :3], x[:, 3:]\n q, y = y[:, :3], y[:, 3:]\n x = F.interpolate(p, q, x, self.kernel_size)\n x = torch.cat([x, y], dim=1)\n in_channels = x.shape[1]\n s = F.kernel_density(q, self.bandwidth).unsqueeze(1)\n s = self.scale(torch.reciprocal(s)) # calculate scaling factor\n _, index = F.knn(q, q, self.kernel_size)\n index = index.view(batch_size, -1).unsqueeze(1)\n # Point and density grouping\n p = torch.gather(q, 2, index.expand(-1, 3, -1))\n x = torch.gather(x, 2, index.expand(-1, in_channels, -1))\n s = torch.gather(s, 2, index)\n p = p.view(batch_size, 3, self.kernel_size, -1)\n x = x.view(batch_size, in_channels, self.kernel_size, -1)\n s = s.view(batch_size, 1, self.kernel_size, -1)\n p = p - q.unsqueeze(2).expand(-1, -1, self.kernel_size, -1)\n w = self.weight(p)\n x = self.mlp(x * s)\n x = torch.matmul(w.permute(0, 3, 1, 2), x.permute(0, 3, 2, 1))\n x = x.permute(0, 3, 2, 1)\n x = self.lin(x).squeeze(2)\n x = torch.cat([q, x], dim=1)\n return x\n",
"import torch\nimport torch.nn as nn\n\n\nclass PointNet(nn.Module):\n def __init__(self, in_channels, num_classes, dropout=0.5):\n super(PointNet, self).__init__()\n self.mlp1 = nn.Sequential(\n nn.Conv1d(in_channels, 64, 1, bias=False),\n nn.BatchNorm1d(64),\n nn.Conv1d(64, 64, 1, bias=False),\n nn.BatchNorm1d(64),\n nn.ReLU(True),\n )\n self.mlp2 = nn.Sequential(\n nn.Conv1d(64, 128, 1, bias=False),\n nn.BatchNorm1d(128),\n nn.ReLU(True),\n nn.Conv1d(128, 1024, 1, bias=False),\n nn.BatchNorm1d(1024),\n nn.ReLU(True),\n )\n self.maxpool = nn.AdaptiveMaxPool1d(1)\n self.mlp3 = nn.Sequential(\n nn.Conv1d(1088, 512, 1, bias=False),\n nn.BatchNorm1d(512),\n nn.ReLU(True),\n nn.Conv1d(512, 256, 1, bias=False),\n nn.BatchNorm1d(256),\n nn.ReLU(True),\n nn.Dropout(dropout),\n nn.Conv1d(256, 128, 1, bias=False),\n nn.BatchNorm1d(128),\n nn.ReLU(True),\n nn.Dropout(dropout),\n )\n self.fc = nn.Conv1d(128, num_classes, 1)\n\n def forward(self, x):\n num_points = x.shape[2]\n f = self.mlp1(x)\n x = self.mlp2(f)\n x = self.maxpool(x).repeat(1, 1, num_points)\n x = torch.cat([f, x], dim=1)\n x = self.mlp3(x)\n x = self.fc(x)\n return x\n"
] |
[
[
"torch.nn.Sequential",
"torch.nn.BatchNorm1d",
"torch.cat",
"torch.nn.Conv2d",
"torch.nn.Sigmoid",
"torch.reciprocal",
"torch.nn.BatchNorm2d",
"torch.nn.Conv1d",
"torch.gather",
"torch.nn.ReLU"
],
[
"torch.nn.BatchNorm1d",
"torch.nn.AdaptiveMaxPool1d",
"torch.nn.Dropout",
"torch.cat",
"torch.nn.Conv1d",
"torch.nn.ReLU"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ngocjr7/stavia
|
[
"39c6c311f3888adb6d08b11190e651ed3b4ef01b",
"39c6c311f3888adb6d08b11190e651ed3b4ef01b"
] |
[
"stavia/logistic_regression.py",
"stavia/feature_extraction.py"
] |
[
"from __future__ import absolute_import\n\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.externals import joblib\nfrom sklearn.model_selection import train_test_split\n\nfrom .utils.parameters import *\nfrom .data_processing import load_data\nfrom .init_es import init_es\nfrom .fuzzy_matching.candidate_graph import CandidateGraph\nfrom .crf import tagger\nfrom .utils.utils import contains_Vietchar, no_accent_vietnamese\nfrom nltk import ngrams\n\nimport numpy as np\nimport pickle, copy\nimport sys\nimport re\nimport numpy as np\n\nmodel = None\n\n\ndef levenshtein_ratio_and_distance(s, t, ratio_calc = True):\n\t\"\"\" levenshtein_ratio_and_distance:\n\t\tCalculates levenshtein distance between two strings.\n\t\tIf ratio_calc = True, the function computes the\n\t\tlevenshtein distance ratio of similarity between two strings\n\t\tFor all i and j, distance[i,j] will contain the Levenshtein\n\t\tdistance between the first i characters of s and the\n\t\tfirst j characters of t\n\t\"\"\"\n\trows = len(s)+1\n\tcols = len(t)+1\n\tdistance = np.zeros((rows,cols),dtype = int)\n\n\tfor i in range(1, rows):\n\t\tfor k in range(1,cols):\n\t\t\tdistance[i][0] = i\n\t\t\tdistance[0][k] = k\n \n\tfor col in range(1, cols):\n\t\tfor row in range(1, rows):\n\t\t\tif s[row-1] == t[col-1]:\n\t\t\t\tcost = 0 \n\t\t\telse:\n\t\t\t\tif ratio_calc == True:\n\t\t\t\t\tcost = 2\n\t\t\t\telse:\n\t\t\t\t\tcost = 1\n\t\t\tdistance[row][col] = min(distance[row-1][col] + 1, # Cost of deletions\n\t\t\t\t\t\t\t\t distance[row][col-1] + 1, # Cost of insertions\n\t\t\t\t\t\t\t\t distance[row-1][col-1] + cost) # Cost of substitutions\n\tif ratio_calc == True:\n\t\tRatio = ((len(s)+len(t)) - distance[row][col]) / float(len(s)+len(t))\n\t\treturn Ratio\n\telse:\n\t\treturn distance[row][col]\n\ndef get_ngrams(text, n):\n\tn_grams = ngrams(text, n)\n\treturn [''.join(grams) for grams in n_grams]\n\n\n\ndef jaccard_similarity(string1, string2):\n\tsum = 0\n\tn_gram = 3\n\tlist1 = get_ngrams(re.sub(r'[^\\w\\s]', '', string1.lower()).strip(), n_gram);\n\tlist2 = get_ngrams(re.sub(r'[^\\w\\s]', '', string2.lower()).strip(), n_gram);\n\tintersection = len(list(set(list1).intersection(list2)))\n\tunion = (len(list1) + len(list2)) - intersection\n\tif union == 0:\n\t\treturn float(0)\n\tsum += float(intersection / union)\n\treturn float(sum)\n\ndef c_score(string1, string2):\n\tlist2 = string2.split(\", \")\n\tc = 0\n\tfor i in list2:\n\t\tif i in string1:\n\t\t\tc += len(i.split(\" \"))\n\treturn 0\n\ndef extract_features(raw_add, entities, candidate):\n\tfeatures = []\n\t#Bias\n\tfeatures.append(1)\n\n\t#Admin_level in crf\n\tfor field in FIELDS:\n\t\tif field in entities:\n\t\t\tfeatures.append(1)\n\t\telse:\n\t\t\tfeatures.append(0)\n\n\t#Admin_level in candidate\n\tfor field in FIELDS:\n\t\tif field in candidate.keys():\n\t\t\tfeatures.append(1)\n\t\telse:\n\t\t\tfeatures.append(0)\n\n\t#Is contain vietnamese character\n\tfeatures.append(1 if contains_Vietchar(raw_add) == True else 0)\n\n\t#Elastic Score\n\tfor field in FIELDS:\n\t\tif field + '_score' in candidate.keys():\n\t\t\tfeatures.append(float(candidate[field+'_score']))\n\t\telse:\n\t\t\tfeatures.append(0.0)\n\n\t#Entity Score\n\tfor field in FIELDS:\n\t\tif field not in entities or field not in candidate.keys():\n\t\t\tfeatures.append(0.0)\n\t\telse:\n\t\t\tvalue = 0\n\t\t\tfor entity in entities[field]:\n\t\t\t\tvalue = max(value, 1 if entity.lower() == candidate[field].lower() else 0)\n\t\t\tfeatures.append(value)\n\n\t#Entity Score with no_accent_vietnamese\n\tfor field in FIELDS:\n\t\tif field not in entities or field not in candidate.keys():\n\t\t\tfeatures.append(0.0)\n\t\telse:\n\t\t\tvalue = 0\n\t\t\tfor entity in entities[field]:\n\t\t\t\tvalue = max(value, 1 if no_accent_vietnamese(entity.lower()) == no_accent_vietnamese(candidate[field].lower()) else 0)\n\t\t\tfeatures.append(value)\n\n\t#Jaccard Score\n\tfor field in FIELDS:\n\t\tif field not in entities or field not in candidate.keys():\n\t\t\tfeatures.append(0.0)\n\t\telse:\n\t\t\tvalue = 0\n\t\t\tfor entity in entities[field]:\n\t\t\t\tvalue = max(value,jaccard_similarity(entity, candidate[field]))\n\t\t\tfeatures.append(value)\n\n\t#Jaccard Score with no_accent_vietnamese\n\tfor field in FIELDS:\n\t\tif field not in entities or field not in candidate.keys():\n\t\t\tfeatures.append(0.0)\n\t\telse:\n\t\t\tvalue = 0\n\t\t\tfor entity in entities[field]:\n\t\t\t\tvalue = max(value, jaccard_similarity(no_accent_vietnamese(entity.lower()), no_accent_vietnamese(candidate[field].lower())))\n\t\t\tfeatures.append(value)\n\n\t#Levenshtein Score\n\tfor field in FIELDS:\n\t\tif field not in entities or field not in candidate.keys():\n\t\t\tfeatures.append(0.0)\n\t\telse:\n\t\t\tvalue = 0\n\t\t\tfor entity in entities[field]:\n\t\t\t\tvalue = max(value, levenshtein_ratio_and_distance(entity.lower(), candidate[field].lower()))\n\t\t\tfeatures.append(value)\n\n\t#Levenshtein Score with no_accent_vietnamese\n\tfor field in FIELDS:\n\t\tif field not in entities or field not in candidate.keys():\n\t\t\tfeatures.append(0.0)\n\t\telse:\n\t\t\tvalue = 0\n\t\t\tfor entity in entities[field]:\n\t\t\t\tvalue = max(value, levenshtein_ratio_and_distance(no_accent_vietnamese(entity.lower()), no_accent_vietnamese(candidate[field].lower())))\n\t\t\tfeatures.append(value)\n\n\treturn features\n\n\ndef lr_detect_entity(inp, tokens=None, labels=None):\n\tif tokens == None or labels == None:\n\t\ttokens, labels = tagger.tag(inp)\n\tentities = {}\n\n\tn = len(tokens)\n\tbuff = ''\n\tlbuff = ''\n\tisEntity = False\n\n\tfor i in range(n):\n\t\tif (labels[i][0] == 'I'):\n\t\t\tbuff += ' ' + tokens[i]\n\t\telse:\n\t\t\tif isEntity == True:\n\t\t\t\tkey = lbuff.lower() \n\t\t\t\tif key not in entities:\n\t\t\t\t\tentities[key] = [buff]\n\t\t\t\telse:\n\t\t\t\t\tentities[key].append(buff)\n\n\t\t\tbuff = tokens[i]\n\t\t\tif labels[i][0] == 'B':\n\t\t\t\tif labels[i] == 'B_DIST':\n\t\t\t\t\tlbuff = 'DISTRICT'\n\t\t\t\telif labels[i] == 'B_PRO':\n\t\t\t\t\tlbuff = 'NAME'\n\t\t\t\telse:\n\t\t\t\t\tlbuff = labels[i][2:]\n\t\t\t\tisEntity = True\n\t\t\telse:\n\t\t\t\tlbuff = labels[i]\n\t\t\t\tisEntity = False\n\n\tif isEntity == True:\n\t\tkey = lbuff.lower() \n\t\tif key not in entities :\n\t\t\tentities[key] = [buff]\n\t\telse:\n\t\t\tentities[key].append(buff)\n\n\n\treturn entities\n\ndef lr_judge(raw_add, entities, candidates):\n\tglobal model\n\tX = []\n\tfor candidate in candidates:\n\t\tX.append(extract_features(raw_add, entities, candidate))\n\n\tif len(X) == 0:\n\t\treturn []\n\t\t\n\tif model == None:\n\t\tmodel = pickle.load(open(MODEL_FINAL_FILE, 'rb'))\n\t\n\ty_preds = model.predict_proba(X)\n\n\tret = []\n\tfor i in range(len(candidates)):\n\t\tcandidate = copy.deepcopy(candidates[i])\n\t\tcandidate['score'] = y_preds[i][1]\n\t\tret.append(candidate)\n\treturn ret\n\ndef train():\n\traw_data = load_data(TRAIN_FINAL_FILE)\n\tprint('number of sample =', len(raw_data))\n\tsys.stdout.flush()\n\n\tX_data = []\n\tY_data = []\n\n\tprint('Extracing Feature -----> ')\n\tsys.stdout.flush()\n\tinit_es()\n\tnumber_positive_sample = 0\n\tfor raw_add, std_add in raw_data:\n\t\tgraph = CandidateGraph.build_graph(raw_add)\n\t\tgraph.prune_by_beam_search(k=BEAM_SIZE)\n\t\tcandidates = graph.extract_address()\n\t\tcrf_entities = lr_detect_entity(raw_add)\n\n\t\tfor candidate in candidates:\n\t\t\tX_data.append(extract_features(raw_add, crf_entities, candidate))\n\t\t\tY_data.append(1 if str(candidate['addr_id']) in std_add else 0)\n\t\t\tnumber_positive_sample += Y_data[-1]\n\n\tprint('Number Positive sample = ', number_positive_sample)\n\tprint('Number Sample = ', len(Y_data))\n\tprint('Spliting data')\n\tsys.stdout.flush()\n\n\tX_train, X_dev, Y_train, Y_dev = train_test_split(X_data, Y_data, test_size=0.13, random_state=42)\n\tprint('length of X_train', len(X_train))\n\tlambs = [0.000001, 0.00001, 0.0001, 0.0003, 0.0006, 0.0001, 0.001, 0.003, 0.006, 0.01, 0.03, 1, 1e20]\n\tmax_acc = 0\n\tbest_lamb = 0.00001\n\n\tfor lamb in lambs:\n\t\tprint('Hyperparameters Tuning ------------------>>>')\n\t\tprint('Lambda = ', lamb)\n\t\tsys.stdout.flush()\n\t\tmodel = LogisticRegression(C=lamb,verbose=0, fit_intercept=True, max_iter=1000,class_weight='balanced')\n\t\tmodel.fit(X_train, Y_train)\n\n\t\tprint('training score',model.score(X_train, Y_train))\n\n\t\tpreds = model.predict(X_dev)\n\t\tacc = (Y_dev == preds).mean()\n\t\tprint('validation accuracy = ', acc)\n\t\tsys.stdout.flush()\n\n\t\tif acc > max_acc:\n\t\t\tmax_acc = acc\n\t\t\tbest_lamb = lamb\n\n\tprint(\"++++++++++++++ FINAL ROUND ++++++++++++\")\n\tprint(\"Choose lambda = \", best_lamb)\n\tsys.stdout.flush()\n\tmodel = LogisticRegression(C=best_lamb,verbose=0, fit_intercept=True, max_iter=1000,class_weight='balanced')\n\tmodel.fit(X_train, Y_train)\n\n\n\tprint('Model parameters:')\n\tprint(model.intercept_, model.coef_)\n\n\tpickle.dump(model, open(MODEL_FINAL_FILE, 'wb'))\n\nif __name__ == '__main__':\n\tprint(judge('doan ke thien cau giay ha noi'))",
"from __future__ import absolute_import\n\nfrom .utils.parameters import *\nfrom .utils.utils import contains_Vietchar, no_accent_vietnamese, tokenize\nfrom nltk import ngrams\n\nimport re\n\nimport numpy as np\ndef levenshtein_ratio_and_distance(s, t, ratio_calc = True):\n\t\"\"\" levenshtein_ratio_and_distance:\n\t\tCalculates levenshtein distance between two strings.\n\t\tIf ratio_calc = True, the function computes the\n\t\tlevenshtein distance ratio of similarity between two strings\n\t\tFor all i and j, distance[i,j] will contain the Levenshtein\n\t\tdistance between the first i characters of s and the\n\t\tfirst j characters of t\n\t\"\"\"\n\trows = len(s)+1\n\tcols = len(t)+1\n\tdistance = np.zeros((rows,cols),dtype = int)\n\n\tfor i in range(1, rows):\n\t\tfor k in range(1,cols):\n\t\t\tdistance[i][0] = i\n\t\t\tdistance[0][k] = k\n \n\tfor col in range(1, cols):\n\t\tfor row in range(1, rows):\n\t\t\tif s[row-1] == t[col-1]:\n\t\t\t\tcost = 0 \n\t\t\telse:\n\t\t\t\tif ratio_calc == True:\n\t\t\t\t\tcost = 2\n\t\t\t\telse:\n\t\t\t\t\tcost = 1\n\t\t\tdistance[row][col] = min(distance[row-1][col] + 1, # Cost of deletions\n\t\t\t\t\t\t\t\t distance[row][col-1] + 1, # Cost of insertions\n\t\t\t\t\t\t\t\t distance[row-1][col-1] + cost) # Cost of substitutions\n\tif ratio_calc == True:\n\t\tRatio = ((len(s)+len(t)) - distance[row][col]) / float(len(s)+len(t))\n\t\treturn Ratio\n\telse:\n\t\treturn distance[row][col]\n\ndef get_ngrams(text, n):\n\tn_grams = ngrams(text, n)\n\treturn [''.join(grams) for grams in n_grams]\n\n\n\ndef jaccard_similarity(string1, string2):\n\tsum = 0\n\tn_gram = 3\n\tlist1 = get_ngrams(re.sub(r'[^\\w\\s]', '', string1.lower()).strip(), n_gram);\n\tlist2 = get_ngrams(re.sub(r'[^\\w\\s]', '', string2.lower()).strip(), n_gram);\n\tintersection = len(list(set(list1).intersection(list2)))\n\tunion = (len(list1) + len(list2)) - intersection\n\tif union == 0:\n\t\treturn float(0)\n\tsum += float(intersection / union)\n\treturn float(sum)\n\ndef c_score(string1, string2):\n\tlist2 = string2.split(\", \")\n\tc = 0\n\tfor i in list2:\n\t\tif i in string1:\n\t\t\tc += len(i.split(\" \"))\n\treturn 0\n\ndef tokenize_field(name, field):\n\tif field == 'district':\n\t\tfield = 'DIST'\n\telse:\n\t\tfield = field.upper()\n\twords = tokenize(name)\n\tlabels = []\n\tif (len(words)) != 0:\n\t\tlabels.append('B_' + field)\n\tfor _ in range(len(words)-1):\n\t\tlabels.append('I_' + field)\n\treturn words, labels\n\n\ndef extract_features(raw_add, entities, candidate):\n\tfeatures = {}\n\t#Bias\n\tfeatures.update({'bias': 1})\n\t#Lexical feature\n\n\t#Admin_level in crf\n\tcrf_max_lv = 0\n\tfor entity, field, loc in entities:\n\t\tfeatures.update({'crf:{}:lv'.format(field) : 1})\n\t\tcrf_max_lv = max(crf_max_lv, MAP_LEVEL[field])\n\n\t# features.update({'crf_max_lv' : crf_max_lv})\n\t#Admin_level in candidate\n\tcdd_max_lv = 0\n\tfor field in FIELDS:\n\t\tif field in candidate.keys():\n\t\t\tfeatures.update({'cdd:{}:lv'.format(field) : 1})\n\t\t\tcdd_max_lv = max(cdd_max_lv, MAP_LEVEL[field])\n\n\t# features.update({'cdd_max_lv' : cdd_max_lv})\n\n\t# features.update({'diff_lv': abs(cdd_max_lv - crf_max_lv)})\n\tcvc = contains_Vietchar(raw_add)\n\t# Elastic Score\n\tfor field in FIELDS:\n\t\tif field + '_score' in candidate.keys():\n\t\t\tfeatures.update({'el:{}:s'.format(field) : float(candidate[field+'_score'])} )\n\t# value = 0\n\t# for field in FIELDS:\n\t# \tif field + '_score' in candidate:\n\t# \t\tvalue += candidate[field + '_score']\n\t# features.update({'elastic:score': value})\n\n\t#min admin level \n\tmin_field_cdd = ''\n\tfor field in FIELDS:\n\t\tif field in candidate:\n\t\t\tmin_field_cdd = field\n\n\t#other score\n\tmatched_entities = {'city': 0, 'district': 0, 'ward': 0, 'street': 0}\n\tif cvc == True:\n\t\t#Is contain vietnamese character\n\t\tfeatures.update({'isVietnamese': 1})\n\t\t#Entity Score\n\t\tfor entity, label, loc in entities:\n\t\t\tfor field in FIELDS:\n\t\t\t\tif field not in candidate:\n\t\t\t\t\tcontinue\n\t\t\t\tvalue = 1 if entity.lower() == candidate[field] else 0\n\t\t\t\tif field == label:\n\t\t\t\t\tmatched_entities[field] = value\n\t\t\t\tfeatures.update({'{}:{}:{}:{}'.format(loc, label, field, 'en'): value})\n\n\t\t#Jaccard Score\n\t\tfor entity, label, loc in entities:\n\t\t\tfor field in FIELDS:\n\t\t\t\tif field not in candidate:\n\t\t\t\t\tcontinue\n\t\t\t\tvalue = jaccard_similarity(entity, candidate[field])\n\t\t\t\t# if field == label:\n\t\t\t\tfeatures.update({'{}:{}:{}:{}'.format(loc, label, field, 'jc'): value})\n\n\t\t#Levenshtein Score\n\t\tfor entity, label, loc in entities:\n\t\t\tfor field in FIELDS:\n\t\t\t\tif field not in candidate:\n\t\t\t\t\tcontinue\n\t\t\t\tvalue = levenshtein_ratio_and_distance(entity.lower(), candidate[field].lower())\n\t\t\t\t# if field == label:\n\t\t\t\tfeatures.update({'{}:{}:{}:{}'.format(loc, label, field, 'lvt'): value})\n\t\t\n\t\tif min_field_cdd != '':\n\t\t\tif matched_entities[min_field_cdd] == 0:\n\t\t\t\tif candidate[min_field_cdd + '_score'] == 0:\n\t\t\t\t\tfeatures.update({'lost:min_lv': 1})\n\t\t\t\telse:\n\t\t\t\t\t# features.update({'els_cdd:min_lv': candidate[min_field_cdd + '_score']})\n\t\t\t\t\tfor entity, label, loc in entities:\n\t\t\t\t\t\tvalue = 1 if entity.lower() == candidate[min_field_cdd] else 0\n\t\t\t\t\t\tfeatures.update({'rep_min:{}:{}'.format(label,min_field_cdd): value})\n\t\t\t\t\t\tvalue = 1 if candidate[min_field_cdd] in entity.lower() else 0\n\t\t\t\t\t\tfeatures.update({'rep_min:{}:{}:in'.format(label,min_field_cdd): value})\n\n\telse:\n\t\t#Entity Score with no_accent_vietnamese\n\t\tfor entity, label, loc in entities:\n\t\t\tfor field in FIELDS:\n\t\t\t\tif field not in candidate:\n\t\t\t\t\tcontinue\n\t\t\t\tvalue = 1 if no_accent_vietnamese(entity.lower()) == no_accent_vietnamese(candidate[field].lower()) else 0\n\t\t\t\tif field == label:\n\t\t\t\t\tmatched_entities[field] = value\n\t\t\t\tfeatures.update({'{}:{}:{}:{}:{}'.format(loc, label, field, 'en', 'nav'): value})\n\t\t\t\t\n\t\t#Jaccard Score with no_accent_vietnamese\n\t\tfor entity, label, loc in entities:\n\t\t\tfor field in FIELDS:\n\t\t\t\tif field not in candidate:\n\t\t\t\t\tcontinue\n\t\t\t\tvalue = jaccard_similarity(no_accent_vietnamese(entity.lower()), no_accent_vietnamese(candidate[field].lower()))\n\t\t\t\t# if field == label:\n\t\t\t\tfeatures.update({'{}:{}:{}:{}:{}'.format(loc, label, field, 'jc', 'nav'): value})\n\t\t#Levenshtein Score with no_accent_vietnamese\n\t\tfor entity, label, loc in entities:\n\t\t\tfor field in FIELDS:\n\t\t\t\tif field not in candidate:\n\t\t\t\t\tcontinue\n\t\t\t\tvalue = levenshtein_ratio_and_distance(no_accent_vietnamese(entity.lower()), no_accent_vietnamese(candidate[field].lower()))\n\t\t\t\t# if field == label:\n\t\t\t\tfeatures.update({'{}:{}:{}:{}:{}'.format(loc, label, field, 'lvt', 'nav'): value})\n\n\t\tif min_field_cdd != '':\n\t\t\tif matched_entities[min_field_cdd] == 0:\n\t\t\t\tif candidate[min_field_cdd + '_score'] == 0:\n\t\t\t\t\tfeatures.update({'lost:min_lv': 1})\n\t\t\t\telse:\n\t\t\t\t\t# features.update({'els_cdd:min_lv': candidate[min_field_cdd + '_score']})\n\t\t\t\t\tfor entity, label, loc in entities:\n\t\t\t\t\t\tvalue = 1 if no_accent_vietnamese(entity.lower()) == no_accent_vietnamese(candidate[min_field_cdd].lower()) else 0\n\t\t\t\t\t\tfeatures.update({'rep_min:{}:{}'.format(label,min_field_cdd): value})\n\t\t\t\t\t\tvalue = 1 if no_accent_vietnamese(candidate[min_field_cdd].lower()) in no_accent_vietnamese(entity.lower()) else 0\n\t\t\t\t\t\tfeatures.update({'rep_min:{}:{}:in'.format(label,min_field_cdd): value})\n\t\t\t\t\n\treturn features"
] |
[
[
"numpy.zeros",
"sklearn.model_selection.train_test_split",
"sklearn.linear_model.LogisticRegression"
],
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nils-werner/pymushra
|
[
"ff9d70879223a43b225ef7eb07813dc24545cb80"
] |
[
"pymushra/casting.py"
] |
[
"import pandas as pd\nimport uuid\nimport itertools\nimport datetime\nfrom . import utils\n\n\ndef escape_objects(df, columns=None):\n df = df.copy()\n\n if columns is None:\n columns = [\n ('questionaire', 'uuid'),\n ('wm', 'id',),\n ]\n\n # Add flattened columns too, so we catch JSON and CSV column names\n columns = columns + utils.flatten_columns(columns)\n\n for col in columns:\n try:\n df[col] = df[col].astype(str)\n except KeyError:\n pass\n\n return df\n\n\ndef collection_to_df(collection):\n \"\"\" Transform TinyDB collection to DataFrame\n\n Parameters\n ----------\n collection : TinyDB collection\n The collection to transform. The entire collection is taken.\n\n Returns\n -------\n d : DataFrame\n The DataFrame\n\n Notes\n -----\n\n Turns dataset inside out:\n\n .. code-block:: yaml\n\n Trial: Something\n Questionaire:\n Name: Nils\n Responses: # Notice the list here\n - Stimulus: C3\n Score: 100\n - Stimulus: C1\n Score: 80\n\n must become\n\n .. code-block:: yaml\n\n - Trial: Something # Notice the list here\n Questionaire:\n Name: Nils\n Responses:\n Stimulus: C3\n Score: 100\n - Trial: Something\n Questionaire:\n Name: Nils\n Responses:\n Stimulus: C1\n Score: 80\n\n For each row in responses we need an aditional row in our DataFrame\n\n \"\"\"\n rawdata = list(collection.all())\n\n if not rawdata:\n return pd.DataFrame()\n\n dataset = []\n\n for trial in rawdata:\n for response in trial['responses']:\n outitem = {}\n\n for key, item in response.items():\n outitem[('responses', key)] = item\n\n for key, item in trial['questionaire'].items():\n outitem[('questionaire', key)] = item\n\n for key, item in trial.items():\n if key not in ('responses', 'questionaire'):\n outitem[('wm', key)] = item\n\n outitem[('wm', 'id')] = str(outitem[('wm', 'id')])\n dataset.append(outitem)\n\n columns = list(set(itertools.chain(*map(lambda x: x.keys(), dataset))))\n\n df = pd.DataFrame(\n dataset,\n columns=pd.MultiIndex.from_tuples(columns)\n )\n\n df[('wm', 'date')] = pd.to_datetime(df[('wm', 'date')])\n\n return df\n\n\ndef json_to_dict(payload):\n \"\"\" Transform webMUSHRA JSON dict to sane structure\n\n Parameters\n ----------\n payload : dict_like\n The container to be transformed\n\n Returns\n -------\n d : dict_like\n The transformed container\n\n Notes\n -----\n\n Actions taken:\n\n 1. One dataset per trial is generated\n 2. Config from global payload is inserted into all datasets\n 3. TestId from global payload is inserted into all datasets\n 4. date is added to all datasets\n 5. Questionaire structure\n\n .. code-block:: python\n\n {'name': ['firstname', 'age'], 'response': ['Nils', 29]}\n\n becomes\n\n .. code-block:: python\n\n {'firstname': 'Nils', 'age': 29}\n\n 6. UUID4 field is added to questionaire\n\n \"\"\"\n questionaire = payload['participant']\n questionaire = dict(\n zip(questionaire['name'], questionaire['response'])\n )\n questionaire['uuid'] = str(uuid.uuid4())\n insert = []\n\n for trial in payload['trials']:\n data = trial\n\n data['config'] = payload['config']\n data['testId'] = payload['testId']\n data['date'] = str(datetime.datetime.now())\n data['questionaire'] = questionaire\n\n insert.append(data)\n\n return insert\n\n\ndef bool_or_fail(v):\n \"\"\" A special variant of :code:`bool` that raises :code:`ValueError`s\n if the provided value was not :code:`True` or :code:`False`.\n\n This prevents overeager casting like :code:`bool(\"bla\") -> True`\n\n Parameters\n ----------\n v : mixed\n Value to be cast\n\n Returns\n -------\n b : boolean\n The result of the cast\n\n \"\"\"\n try:\n if v.lower() == 'true':\n return True\n elif v.lower() == 'false':\n return True\n except Exception:\n pass\n raise ValueError()\n\n\ndef cast_recursively(d, castto=None):\n \"\"\" Traverse list or dict recursively, trying to cast their items.\n\n Parameters\n ----------\n d : iterable or dict_like\n The container to be casted\n castto : list of callables\n The types to cast to. Defaults to :code:`bool_or_fail, int, float`\n\n Returns\n -------\n d : iterable or dict_like\n The transformed container\n\n \"\"\"\n if castto is None:\n castto = (bool_or_fail, int, float)\n\n if isinstance(d, dict):\n return {\n k: cast_recursively(v, castto=castto)\n for k, v in d.items()\n }\n elif isinstance(d, list):\n return [cast_recursively(v, castto=castto) for v in d]\n else:\n for tp in castto:\n try:\n return tp(d)\n except (ValueError, TypeError):\n pass\n return d\n"
] |
[
[
"pandas.to_datetime",
"pandas.MultiIndex.from_tuples",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
MukaJiTrue/incubator-superset
|
[
"e2854994bae0e390bfbf446c94ccb4a86792220b"
] |
[
"tests/celery_tests.py"
] |
[
"\"\"\"Unit tests for Superset Celery worker\"\"\"\nimport json\nimport subprocess\nimport time\nimport unittest\n\nimport pandas as pd\nfrom past.builtins import basestring\n\nfrom superset import app, db\nfrom superset.models.helpers import QueryStatus\nfrom superset.models.sql_lab import Query\nfrom superset.sql_parse import SupersetQuery\nfrom superset.utils.core import get_main_database\nfrom .base_tests import SupersetTestCase\n\n\nBASE_DIR = app.config.get('BASE_DIR')\nCELERY_SLEEP_TIME = 5\n\n\nclass CeleryConfig(object):\n BROKER_URL = app.config.get('CELERY_RESULT_BACKEND')\n CELERY_IMPORTS = ('superset.sql_lab', )\n CELERY_ANNOTATIONS = {'sql_lab.add': {'rate_limit': '10/s'}}\n CONCURRENCY = 1\n\n\napp.config['CELERY_CONFIG'] = CeleryConfig\n\n\nclass UtilityFunctionTests(SupersetTestCase):\n\n # TODO(bkyryliuk): support more cases in CTA function.\n def test_create_table_as(self):\n q = SupersetQuery('SELECT * FROM outer_space;')\n\n self.assertEqual(\n 'CREATE TABLE tmp AS \\nSELECT * FROM outer_space',\n q.as_create_table('tmp'))\n\n self.assertEqual(\n 'DROP TABLE IF EXISTS tmp;\\n'\n 'CREATE TABLE tmp AS \\nSELECT * FROM outer_space',\n q.as_create_table('tmp', overwrite=True))\n\n # now without a semicolon\n q = SupersetQuery('SELECT * FROM outer_space')\n self.assertEqual(\n 'CREATE TABLE tmp AS \\nSELECT * FROM outer_space',\n q.as_create_table('tmp'))\n\n # now a multi-line query\n multi_line_query = (\n 'SELECT * FROM planets WHERE\\n'\n \"Luke_Father = 'Darth Vader'\")\n q = SupersetQuery(multi_line_query)\n self.assertEqual(\n 'CREATE TABLE tmp AS \\nSELECT * FROM planets WHERE\\n'\n \"Luke_Father = 'Darth Vader'\",\n q.as_create_table('tmp'),\n )\n\n\nclass CeleryTestCase(SupersetTestCase):\n def __init__(self, *args, **kwargs):\n super(CeleryTestCase, self).__init__(*args, **kwargs)\n self.client = app.test_client()\n\n def get_query_by_name(self, sql):\n session = db.session\n query = session.query(Query).filter_by(sql=sql).first()\n session.close()\n return query\n\n def get_query_by_id(self, id):\n session = db.session\n query = session.query(Query).filter_by(id=id).first()\n session.close()\n return query\n\n @classmethod\n def setUpClass(cls):\n db.session.query(Query).delete()\n db.session.commit()\n\n worker_command = BASE_DIR + '/bin/superset worker -w 2'\n subprocess.Popen(\n worker_command, shell=True, stdout=subprocess.PIPE)\n\n @classmethod\n def tearDownClass(cls):\n subprocess.call(\n \"ps auxww | grep 'celeryd' | awk '{print $2}' | xargs kill -9\",\n shell=True,\n )\n subprocess.call(\n \"ps auxww | grep 'superset worker' | awk '{print $2}' | xargs kill -9\",\n shell=True,\n )\n\n def run_sql(self, db_id, sql, client_id=None, cta='false', tmp_table='tmp',\n async_='false'):\n self.login()\n resp = self.client.post(\n '/superset/sql_json/',\n data=dict(\n database_id=db_id,\n sql=sql,\n runAsync=async_,\n select_as_cta=cta,\n tmp_table_name=tmp_table,\n client_id=client_id,\n ),\n )\n self.logout()\n return json.loads(resp.data.decode('utf-8'))\n\n def test_run_sync_query_dont_exist(self):\n main_db = get_main_database(db.session)\n db_id = main_db.id\n sql_dont_exist = 'SELECT name FROM table_dont_exist'\n result1 = self.run_sql(db_id, sql_dont_exist, '1', cta='true')\n self.assertTrue('error' in result1)\n\n def test_run_sync_query_cta(self):\n main_db = get_main_database(db.session)\n db_id = main_db.id\n eng = main_db.get_sqla_engine()\n tmp_table_name = 'tmp_async_22'\n self.drop_table_if_exists(tmp_table_name, main_db)\n perm_name = 'can_sql_json'\n sql_where = (\n \"SELECT name FROM ab_permission WHERE name='{}'\".format(perm_name))\n result2 = self.run_sql(\n db_id, sql_where, '2', tmp_table=tmp_table_name, cta='true')\n self.assertEqual(QueryStatus.SUCCESS, result2['query']['state'])\n self.assertEqual([], result2['data'])\n self.assertEqual([], result2['columns'])\n query2 = self.get_query_by_id(result2['query']['serverId'])\n\n # Check the data in the tmp table.\n df2 = pd.read_sql_query(sql=query2.select_sql, con=eng)\n data2 = df2.to_dict(orient='records')\n self.assertEqual([{'name': perm_name}], data2)\n\n def test_run_sync_query_cta_no_data(self):\n main_db = get_main_database(db.session)\n db_id = main_db.id\n sql_empty_result = 'SELECT * FROM ab_user WHERE id=666'\n result3 = self.run_sql(db_id, sql_empty_result, '3')\n self.assertEqual(QueryStatus.SUCCESS, result3['query']['state'])\n self.assertEqual([], result3['data'])\n self.assertEqual([], result3['columns'])\n\n query3 = self.get_query_by_id(result3['query']['serverId'])\n self.assertEqual(QueryStatus.SUCCESS, query3.status)\n\n def drop_table_if_exists(self, table_name, database=None):\n \"\"\"Drop table if it exists, works on any DB\"\"\"\n sql = 'DROP TABLE {}'.format(table_name)\n db_id = database.id\n if database:\n database.allow_dml = True\n db.session.flush()\n return self.run_sql(db_id, sql)\n\n def test_run_async_query(self):\n main_db = get_main_database(db.session)\n db_id = main_db.id\n\n self.drop_table_if_exists('tmp_async_1', main_db)\n\n sql_where = \"SELECT name FROM ab_role WHERE name='Admin'\"\n result = self.run_sql(\n db_id, sql_where, '4', async_='true', tmp_table='tmp_async_1',\n cta='true')\n assert result['query']['state'] in (\n QueryStatus.PENDING, QueryStatus.RUNNING, QueryStatus.SUCCESS)\n\n time.sleep(CELERY_SLEEP_TIME)\n\n query = self.get_query_by_id(result['query']['serverId'])\n self.assertEqual(QueryStatus.SUCCESS, query.status)\n self.assertTrue('FROM tmp_async_1' in query.select_sql)\n self.assertEqual(\n 'CREATE TABLE tmp_async_1 AS \\nSELECT name FROM ab_role '\n \"WHERE name='Admin' LIMIT 666\", query.executed_sql)\n self.assertEqual(sql_where, query.sql)\n self.assertEqual(0, query.rows)\n self.assertEqual(False, query.limit_used)\n self.assertEqual(True, query.select_as_cta)\n self.assertEqual(True, query.select_as_cta_used)\n\n def test_run_async_query_with_lower_limit(self):\n main_db = get_main_database(db.session)\n db_id = main_db.id\n self.drop_table_if_exists('tmp_async_2', main_db)\n\n sql_where = \"SELECT name FROM ab_role WHERE name='Alpha' LIMIT 1\"\n result = self.run_sql(\n db_id, sql_where, '5', async_='true', tmp_table='tmp_async_2',\n cta='true')\n assert result['query']['state'] in (\n QueryStatus.PENDING, QueryStatus.RUNNING, QueryStatus.SUCCESS)\n\n time.sleep(CELERY_SLEEP_TIME)\n\n query = self.get_query_by_id(result['query']['serverId'])\n self.assertEqual(QueryStatus.SUCCESS, query.status)\n self.assertTrue('FROM tmp_async_2' in query.select_sql)\n self.assertEqual(\n 'CREATE TABLE tmp_async_2 AS \\nSELECT name FROM ab_role '\n \"WHERE name='Alpha' LIMIT 1\", query.executed_sql)\n self.assertEqual(sql_where, query.sql)\n self.assertEqual(0, query.rows)\n self.assertEqual(1, query.limit)\n self.assertEqual(True, query.select_as_cta)\n self.assertEqual(True, query.select_as_cta_used)\n\n @staticmethod\n def de_unicode_dict(d):\n def str_if_basestring(o):\n if isinstance(o, basestring):\n return str(o)\n return o\n return {str_if_basestring(k): str_if_basestring(d[k]) for k in d}\n\n @classmethod\n def dictify_list_of_dicts(cls, l, k):\n return {str(o[k]): cls.de_unicode_dict(o) for o in l}\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"pandas.read_sql_query"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
haipinglu/nilearn
|
[
"82a9791f4428389b3a57b82cc57406a3a5cb50b1",
"82a9791f4428389b3a57b82cc57406a3a5cb50b1"
] |
[
"nilearn/plotting/html_surface.py",
"nilearn/plotting/html_stat_map.py"
] |
[
"import json\nimport collections\n\nimport numpy as np\nimport matplotlib as mpl\nfrom matplotlib import cm as mpl_cm\n\nfrom .._utils.niimg_conversions import check_niimg_3d\nfrom .. import datasets, surface\nfrom . import cm\nfrom .js_plotting_utils import (\n HTMLDocument, colorscale, mesh_to_plotly, get_html_template, add_js_lib,\n to_color_strings)\n\n\nclass SurfaceView(HTMLDocument):\n pass\n\n\ndef _get_vertexcolor(surf_map, cmap, norm,\n absolute_threshold=None, bg_map=None):\n vertexcolor = cmap(norm(surf_map).data)\n if absolute_threshold is None:\n return to_color_strings(vertexcolor)\n if bg_map is None:\n bg_map = np.ones(len(surf_map)) * .5\n bg_vmin, bg_vmax = 0, 1\n else:\n bg_map = surface.load_surf_data(bg_map)\n bg_vmin, bg_vmax = np.min(bg_map), np.max(bg_map)\n bg_norm = mpl.colors.Normalize(vmin=bg_vmin, vmax=bg_vmax)\n bg_color = mpl_cm.get_cmap('Greys')(bg_norm(bg_map))\n vertexcolor[np.abs(surf_map) < absolute_threshold] = bg_color[\n np.abs(surf_map) < absolute_threshold]\n return to_color_strings(vertexcolor)\n\n\ndef one_mesh_info(surf_map, surf_mesh, threshold=None, cmap=cm.cold_hot,\n black_bg=False, bg_map=None, symmetric_cmap=True,\n vmax=None):\n \"\"\"\n Prepare info for plotting one surface map on a single mesh.\n\n\n This computes the dictionary that gets inserted in the web page,\n which contains the encoded mesh, colors, min and max values, and\n background color.\n\n \"\"\"\n info = {}\n colors = colorscale(\n cmap, surf_map, threshold, symmetric_cmap=symmetric_cmap, vmax=vmax)\n info['inflated_left'] = mesh_to_plotly(surf_mesh)\n info['vertexcolor_left'] = _get_vertexcolor(\n surf_map, colors['cmap'], colors['norm'],\n colors['abs_threshold'], bg_map)\n info[\"cmin\"], info[\"cmax\"] = float(colors['vmin']), float(colors['vmax'])\n info['black_bg'] = black_bg\n info['full_brain_mesh'] = False\n info['colorscale'] = colors['colors']\n return info\n\n\ndef _check_mesh(mesh):\n if isinstance(mesh, str):\n return datasets.fetch_surf_fsaverage(mesh)\n if not isinstance(mesh, collections.Mapping):\n raise TypeError(\"The mesh should be a str or a dictionary, \"\n \"you provided: {}.\".format(type(mesh).__name__))\n missing = {'pial_left', 'pial_right', 'sulc_left', 'sulc_right',\n 'infl_left', 'infl_right'}.difference(mesh.keys())\n if missing:\n raise ValueError(\n \"{} {} missing from the provided mesh dictionary\".format(\n missing, ('are' if len(missing) > 1 else 'is')))\n return mesh\n\n\ndef full_brain_info(volume_img, mesh='fsaverage5', threshold=None,\n cmap=cm.cold_hot, black_bg=False, symmetric_cmap=True,\n vmax=None, vol_to_surf_kwargs={}):\n \"\"\"\n Project 3D map on cortex; prepare info to plot both hemispheres.\n\n\n This computes the dictionary that gets inserted in the web page,\n which contains encoded meshes, colors, min and max values, and\n background color.\n\n \"\"\"\n info = {}\n mesh = _check_mesh(mesh)\n surface_maps = {\n h: surface.vol_to_surf(volume_img, mesh['pial_{}'.format(h)],\n **vol_to_surf_kwargs)\n for h in ['left', 'right']\n }\n colors = colorscale(\n cmap, np.asarray(list(surface_maps.values())).ravel(), threshold,\n symmetric_cmap=symmetric_cmap, vmax=vmax)\n\n for hemi, surf_map in surface_maps.items():\n bg_map = surface.load_surf_data(mesh['sulc_{}'.format(hemi)])\n info['pial_{}'.format(hemi)] = mesh_to_plotly(\n mesh['pial_{}'.format(hemi)])\n info['inflated_{}'.format(hemi)] = mesh_to_plotly(\n mesh['infl_{}'.format(hemi)])\n\n info['vertexcolor_{}'.format(hemi)] = _get_vertexcolor(\n surf_map, colors['cmap'], colors['norm'],\n colors['abs_threshold'], bg_map)\n info[\"cmin\"], info[\"cmax\"] = float(colors['vmin']), float(colors['vmax'])\n info['black_bg'] = black_bg\n info['full_brain_mesh'] = True\n info['colorscale'] = colors['colors']\n return info\n\n\ndef _fill_html_template(info, embed_js=True):\n as_json = json.dumps(info)\n as_html = get_html_template('surface_plot_template.html').safe_substitute(\n {'INSERT_STAT_MAP_JSON_HERE': as_json})\n as_html = add_js_lib(as_html, embed_js=embed_js)\n return SurfaceView(as_html)\n\n\ndef view_img_on_surf(stat_map_img, surf_mesh='fsaverage5',\n threshold=None, cmap=cm.cold_hot,\n black_bg=False, vmax=None):\n \"\"\"\n Insert a surface plot of a statistical map into an HTML page.\n\n Parameters\n ----------\n stat_map_img : Niimg-like object, 3D\n See http://nilearn.github.io/manipulating_images/input_output.html\n\n surf_mesh : str or dict, optional.\n if 'fsaverage5', use fsaverage5 mesh from nilearn.datasets\n if 'fsaverage', use fsaverage mesh from nilearn.datasets\n if a dictionary, it should have the same structure as those returned by\n nilearn.datasets.fetch_surf_fsaverage, i.e. keys should be 'infl_left',\n 'pial_left', 'sulc_left', 'infl_right', 'pial_right', and 'sulc_right',\n containing inflated and pial meshes, and sulcal depth values for left\n and right hemispheres.\n\n threshold : str, number or None, optional (default=None)\n If None, no thresholding.\n If it is a number only values of amplitude greater\n than threshold will be shown.\n If it is a string it must finish with a percent sign,\n e.g. \"25.3%\", and only values of amplitude above the\n given percentile will be shown.\n\n cmap : str or matplotlib colormap, optional\n\n black_bg : bool, optional (default=False)\n If True, image is plotted on a black background. Otherwise on a\n white background.\n\n vmax : float or None, optional (default=None)\n upper bound for the colorbar. if None, use the absolute max of the\n brain map.\n\n Returns\n -------\n SurfaceView : plot of the stat map.\n It can be saved as an html page or rendered (transparently) by the\n Jupyter notebook. Useful methods are :\n\n - 'resize' to resize the plot displayed in a Jupyter notebook\n - 'save_as_html' to save the plot to a file\n - 'open_in_browser' to save the plot and open it in a web browser.\n\n See Also\n --------\n nilearn.plotting.view_surf: plot from a surface map on a cortical mesh.\n\n \"\"\"\n stat_map_img = check_niimg_3d(stat_map_img)\n info = full_brain_info(\n volume_img=stat_map_img, mesh=surf_mesh, threshold=threshold,\n cmap=cmap, black_bg=black_bg, vmax=vmax)\n return _fill_html_template(info, embed_js=True)\n\n\ndef view_surf(surf_mesh, surf_map=None, bg_map=None, threshold=None,\n cmap=cm.cold_hot, black_bg=False, vmax=None,\n symmetric_cmap=True):\n \"\"\"\n Insert a surface plot of a surface map into an HTML page.\n\n Parameters\n ----------\n surf_mesh: str or list of two numpy.ndarray\n Surface mesh geometry, can be a file (valid formats are\n .gii or Freesurfer specific files such as .orig, .pial,\n .sphere, .white, .inflated) or\n a list of two Numpy arrays, the first containing the x-y-z coordinates\n of the mesh vertices, the second containing the indices\n (into coords) of the mesh faces.\n\n surf_map: str or numpy.ndarray, optional.\n Data to be displayed on the surface mesh. Can be a file (valid formats\n are .gii, .mgz, .nii, .nii.gz, or Freesurfer specific files such as\n .thickness, .curv, .sulc, .annot, .label) or\n a Numpy array\n\n bg_map: Surface data, optional,\n Background image to be plotted on the mesh underneath the\n surf_data in greyscale, most likely a sulcal depth map for\n realistic shading.\n\n threshold : str, number or None, optional (default=None)\n If None, no thresholding.\n If it is a number only values of amplitude greater\n than threshold will be shown.\n If it is a string it must finish with a percent sign,\n e.g. \"25.3%\", and only values of amplitude above the\n given percentile will be shown.\n\n cmap : str or matplotlib colormap, optional\n You might want to change it to 'gnist_ncar' if plotting a\n surface atlas.\n\n black_bg : bool, optional (default=False)\n If True, image is plotted on a black background. Otherwise on a\n white background.\n\n symmetric_cmap : bool, optional (default=True)\n Make colormap symmetric (ranging from -vmax to vmax).\n Set it to False if you are plotting a surface atlas.\n\n vmax : float or None, optional (default=None)\n upper bound for the colorbar. if None, use the absolute max of the\n brain map.\n\n Returns\n -------\n SurfaceView : plot of the stat map.\n It can be saved as an html page or rendered (transparently) by the\n Jupyter notebook. Useful methods are :\n\n - 'resize' to resize the plot displayed in a Jupyter notebook\n - 'save_as_html' to save the plot to a file\n - 'open_in_browser' to save the plot and open it in a web browser.\n\n See Also\n --------\n nilearn.plotting.view_img_on_surf: Surface plot from a 3D statistical map.\n\n \"\"\"\n surf_mesh = surface.load_surf_mesh(surf_mesh)\n if surf_map is None:\n surf_map = np.ones(len(surf_mesh[0]))\n if surf_map is not None:\n surface.check_mesh_and_data(surf_mesh, surf_map)\n if bg_map is not None:\n surface.check_mesh_and_data(surf_mesh, bg_map)\n info = one_mesh_info(\n surf_map=surf_map, surf_mesh=surf_mesh, threshold=threshold,\n cmap=cmap, black_bg=black_bg, bg_map=bg_map,\n symmetric_cmap=symmetric_cmap, vmax=vmax)\n return _fill_html_template(info, embed_js=True)\n",
"\"\"\"\nVisualizing 3D stat maps in a Brainsprite viewer\n\"\"\"\nimport os\nimport json\nfrom io import BytesIO\n\nimport numpy as np\nfrom matplotlib.image import imsave\n\nfrom nibabel.affines import apply_affine\n\nfrom ..image import resample_to_img, new_img_like, reorder_img\nfrom .js_plotting_utils import get_html_template, HTMLDocument, colorscale\nfrom ..plotting import cm\nfrom ..plotting.find_cuts import find_xyz_cut_coords\nfrom ..plotting.img_plotting import _load_anat\nfrom .._utils.niimg_conversions import check_niimg_3d\nfrom .._utils.param_validation import check_threshold\nfrom .._utils.extmath import fast_abs_percentile\nfrom .._utils.niimg import _safe_get_data\nfrom .._utils.compat import _encodebytes\nfrom ..datasets import load_mni152_template\n\n\ndef _data_to_sprite(data):\n \"\"\" Convert a 3D array into a sprite of sagittal slices.\n Returns: sprite (2D numpy array)\n If each sagittal slice is nz (height) x ny (width) pixels, the sprite\n size is (M x nz) x (N x ny), where M and N are computed to be roughly\n equal. All slices are pasted together row by row, from top left to\n bottom right. The last row is completed with empty slices.\n \"\"\"\n\n nx, ny, nz = data.shape\n nrows = int(np.ceil(np.sqrt(nx)))\n ncolumns = int(np.ceil(nx / float(nrows)))\n\n sprite = np.zeros((nrows * nz, ncolumns * ny))\n indrow, indcol = np.where(np.ones((nrows, ncolumns)))\n\n for xx in range(nx):\n # we need to flip the image in the x axis\n sprite[(indrow[xx] * nz):((indrow[xx] + 1) * nz), (indcol[xx] * ny):\n ((indcol[xx] + 1) * ny)] = data[xx, :, ::-1].transpose()\n\n return sprite\n\n\ndef _threshold_data(data, threshold=None):\n \"\"\" Threshold a data array.\n Returns: data (masked array), threshold (updated)\n \"\"\"\n # If threshold is None, do nothing\n if threshold is None:\n return data, threshold\n\n # Deal with automatic settings of plot parameters\n if threshold == 'auto':\n # Threshold epsilon below a percentile value, to be sure that some\n # voxels pass the threshold\n threshold = fast_abs_percentile(data) - 1e-5\n\n # Threshold\n threshold = check_threshold(threshold, data,\n percentile_func=fast_abs_percentile,\n name='threshold')\n\n # Mask data\n if threshold == 0:\n data = np.ma.masked_equal(data, 0, copy=False)\n else:\n data = np.ma.masked_inside(data, -threshold, threshold, copy=False)\n return data, threshold\n\n\ndef _save_sprite(data, output_sprite, vmax, vmin, mask=None, cmap='Greys',\n format='png'):\n \"\"\" Generate a sprite from a 3D Niimg-like object.\n Returns: sprite\n \"\"\"\n\n # Create sprite\n sprite = _data_to_sprite(data)\n\n # Mask the sprite\n if mask is not None:\n mask = _data_to_sprite(mask)\n sprite = np.ma.array(sprite, mask=mask)\n\n # Save the sprite\n imsave(output_sprite, sprite, vmin=vmin, vmax=vmax, cmap=cmap,\n format=format)\n\n return sprite\n\n\ndef _bytesIO_to_base64(handle_io):\n \"\"\" Encode the content of a bytesIO virtual file as base64.\n Also closes the file.\n Returns: data\n \"\"\"\n handle_io.seek(0)\n data = _encodebytes(handle_io.read()).decode('utf-8')\n handle_io.close()\n return data\n\n\ndef _save_cm(output_cmap, cmap, format='png', n_colors=256):\n \"\"\" Save the colormap of an image as an image file.\n \"\"\"\n\n # save the colormap\n data = np.arange(0., n_colors) / (n_colors - 1.)\n data = data.reshape([1, n_colors])\n imsave(output_cmap, data, cmap=cmap, format=format)\n\n\nclass StatMapView(HTMLDocument):\n pass\n\n\ndef _mask_stat_map(stat_map_img, threshold=None):\n \"\"\" Load a stat map and apply a threshold.\n Returns: mask_img, stat_map_img, data, threshold\n \"\"\"\n # Load stat map\n stat_map_img = check_niimg_3d(stat_map_img, dtype='auto')\n data = _safe_get_data(stat_map_img, ensure_finite=True)\n\n # threshold the stat_map\n if threshold is not None:\n data, threshold = _threshold_data(data, threshold)\n mask_img = new_img_like(stat_map_img, data.mask, stat_map_img.affine)\n else:\n mask_img = new_img_like(stat_map_img, np.zeros(data.shape),\n stat_map_img.affine)\n return mask_img, stat_map_img, data, threshold\n\n\ndef _load_bg_img(stat_map_img, bg_img='MNI152', black_bg='auto', dim='auto'):\n \"\"\" Load and resample bg_img in an isotropic resolution,\n with a positive diagonal affine matrix.\n Returns: bg_img, bg_min, bg_max, black_bg\n \"\"\"\n if (bg_img is None or bg_img is False) and black_bg == 'auto':\n black_bg = False\n\n if bg_img is not None and bg_img is not False:\n if isinstance(bg_img, str) and bg_img == \"MNI152\":\n bg_img = load_mni152_template()\n bg_img, black_bg, bg_min, bg_max = _load_anat(bg_img, dim=dim,\n black_bg=black_bg)\n else:\n bg_img = new_img_like(stat_map_img, np.zeros(stat_map_img.shape),\n stat_map_img.affine)\n bg_min = 0\n bg_max = 0\n bg_img = reorder_img(bg_img, resample='nearest')\n return bg_img, bg_min, bg_max, black_bg\n\n\ndef _resample_stat_map(stat_map_img, bg_img, mask_img,\n resampling_interpolation='continuous'):\n \"\"\" Resample the stat map and mask to the background.\n Returns: stat_map_img, mask_img\n \"\"\"\n stat_map_img = resample_to_img(stat_map_img, bg_img,\n interpolation=resampling_interpolation)\n mask_img = resample_to_img(mask_img, bg_img, fill_value=1,\n interpolation='nearest')\n\n return stat_map_img, mask_img\n\n\ndef _json_view_params(shape, affine, vmin, vmax, cut_slices, black_bg=False,\n opacity=1, draw_cross=True, annotate=True, title=None,\n colorbar=True, value=True):\n \"\"\" Create a dictionary with all the brainsprite parameters.\n Returns: params\n \"\"\"\n\n # Set color parameters\n if black_bg:\n cfont = '#FFFFFF'\n cbg = '#000000'\n else:\n cfont = '#000000'\n cbg = '#FFFFFF'\n\n # Deal with limitations of json dump regarding types\n if type(vmin).__module__ == 'numpy':\n vmin = vmin.tolist() # json does not deal with numpy array\n if type(vmax).__module__ == 'numpy':\n vmax = vmax.tolist() # json does not deal with numpy array\n\n params = {'canvas': '3Dviewer',\n 'sprite': 'spriteImg',\n 'nbSlice': {'X': shape[0],\n 'Y': shape[1],\n 'Z': shape[2]},\n 'overlay': {'sprite': 'overlayImg',\n 'nbSlice': {'X': shape[0],\n 'Y': shape[1],\n 'Z': shape[2]},\n 'opacity': opacity},\n 'colorBackground': cbg,\n 'colorFont': cfont,\n 'crosshair': draw_cross,\n 'affine': affine.tolist(),\n 'flagCoordinates': annotate,\n 'title': title,\n 'flagValue': value,\n 'numSlice': {'X': cut_slices[0] - 1,\n 'Y': cut_slices[1] - 1,\n 'Z': cut_slices[2] - 1}}\n\n if colorbar:\n params['colorMap'] = {'img': 'colorMap',\n 'min': vmin,\n 'max': vmax}\n return params\n\n\ndef _json_view_size(params):\n \"\"\" Define the size of the viewer.\n Returns: width_view, height_view\n \"\"\"\n # slices_width = sagittal_width (y) + coronal_width (x) + axial_width (x)\n slices_width = params['nbSlice']['Y'] + 2 * params['nbSlice']['X']\n\n # slices_height = max of sagittal_height (z), coronal_height (z), and\n # axial_height (y).\n # Also add 20% extra height for annotation and margin\n slices_height = np.max([params['nbSlice']['Y'], params['nbSlice']['Z']])\n slices_height = 1.20 * slices_height\n\n # Get the final size of the viewer\n width_view = 600\n ratio = slices_height / slices_width\n height_view = np.ceil(ratio * width_view)\n\n return width_view, height_view\n\n\ndef _json_view_data(bg_img, stat_map_img, mask_img, bg_min, bg_max, colors,\n cmap, colorbar):\n \"\"\" Create a json-like viewer object, and populate with base64 data.\n Returns: json_view\n \"\"\"\n # Initialise brainsprite data structure\n json_view = dict.fromkeys(['bg_base64', 'stat_map_base64', 'cm_base64',\n 'params', 'js_jquery', 'js_brainsprite'])\n\n # Create a base64 sprite for the background\n bg_sprite = BytesIO()\n bg_data = _safe_get_data(bg_img, ensure_finite=True)\n _save_sprite(bg_data, bg_sprite, bg_max, bg_min, None, 'gray', 'png')\n json_view['bg_base64'] = _bytesIO_to_base64(bg_sprite)\n\n # Create a base64 sprite for the stat map\n stat_map_sprite = BytesIO()\n data = _safe_get_data(stat_map_img, ensure_finite=True)\n mask = _safe_get_data(mask_img, ensure_finite=True)\n _save_sprite(data, stat_map_sprite, colors['vmax'], colors['vmin'],\n mask, cmap, 'png')\n json_view['stat_map_base64'] = _bytesIO_to_base64(stat_map_sprite)\n\n # Create a base64 colormap\n if colorbar:\n stat_map_cm = BytesIO()\n _save_cm(stat_map_cm, colors['cmap'], 'png')\n json_view['cm_base64'] = _bytesIO_to_base64(stat_map_cm)\n else:\n json_view['cm_base64'] = ''\n\n return json_view\n\n\ndef _json_view_to_html(json_view):\n \"\"\" Fill a brainsprite html template with relevant parameters and data.\n Returns: html_view\n \"\"\"\n\n # Fix the size of the viewer\n width, height = _json_view_size(json_view['params'])\n\n # Populate all missing keys with html-ready data\n json_view['params'] = json.dumps(json_view['params'])\n js_dir = os.path.join(os.path.dirname(__file__), 'data', 'js')\n with open(os.path.join(js_dir, 'jquery.min.js')) as f:\n json_view['js_jquery'] = f.read()\n with open(os.path.join(js_dir, 'brainsprite.min.js')) as f:\n json_view['js_brainsprite'] = f.read()\n\n # Load the html template, and plug in all the data\n html_view = get_html_template('stat_map_template.html')\n html_view = html_view.safe_substitute(json_view)\n\n return StatMapView(html_view, width=width, height=height)\n\n\ndef _get_cut_slices(stat_map_img, cut_coords=None, threshold=None):\n \"\"\" For internal use.\n Find slice numbers for the cut.\n Based on find_xyz_cut_coords\n \"\"\"\n # Select coordinates for the cut\n if cut_coords is None:\n cut_coords = find_xyz_cut_coords(\n stat_map_img, activation_threshold=threshold)\n\n # Convert cut coordinates into cut slices\n try:\n cut_slices = apply_affine(np.linalg.inv(stat_map_img.affine),\n cut_coords)\n except ValueError:\n raise ValueError(\n \"The input given for display_mode='ortho' needs to be \"\n \"a list of 3d world coordinates in (x, y, z). \"\n \"You provided cut_coords={0}\".format(cut_coords))\n except IndexError:\n raise ValueError(\n \"The input given for display_mode='ortho' needs to be \"\n \"a list of 3d world coordinates in (x, y, z). \"\n \"You provided single cut, cut_coords={0}\".format(cut_coords))\n\n return cut_slices\n\n\ndef view_img(stat_map_img, bg_img='MNI152',\n cut_coords=None,\n colorbar=True,\n title=None,\n threshold=1e-6,\n annotate=True,\n draw_cross=True,\n black_bg='auto',\n cmap=cm.cold_hot,\n symmetric_cmap=True,\n dim='auto',\n vmax=None,\n vmin=None,\n resampling_interpolation='continuous',\n opacity=1,\n **kwargs\n ):\n \"\"\"\n Interactive html viewer of a statistical map, with optional background\n\n Parameters\n ----------\n stat_map_img : Niimg-like object\n See http://nilearn.github.io/manipulating_images/input_output.html\n The statistical map image. Can be either a 3D volume or a 4D volume\n with exactly one time point.\n bg_img : Niimg-like object (default='MNI152')\n See http://nilearn.github.io/manipulating_images/input_output.html\n The background image that the stat map will be plotted on top of.\n If nothing is specified, the MNI152 template will be used.\n To turn off background image, just pass \"bg_img=False\".\n cut_coords : None, or a tuple of floats (default None)\n The MNI coordinates of the point where the cut is performed\n as a 3-tuple: (x, y, z). If None is given, the cuts are calculated\n automaticaly.\n colorbar : boolean, optional (default True)\n If True, display a colorbar on top of the plots.\n title : string or None (default=None)\n The title displayed on the figure (or None: no title).\n threshold : string, number or None (default=1e-6)\n If None is given, the image is not thresholded.\n If a string of the form \"90%\" is given, use the 90-th percentile of\n the absolute value in the image.\n If a number is given, it is used to threshold the image:\n values below the threshold (in absolute value) are plotted\n as transparent. If auto is given, the threshold is determined\n automatically.\n annotate : boolean (default=True)\n If annotate is True, current cuts and value of the map are added to the\n viewer.\n draw_cross : boolean (default=True)\n If draw_cross is True, a cross is drawn on the plot to\n indicate the cuts.\n black_bg : boolean (default='auto')\n If True, the background of the image is set to be black.\n Otherwise, a white background is used.\n If set to auto, an educated guess is made to find if the background\n is white or black.\n cmap : matplotlib colormap, optional\n The colormap for specified image.\n symmetric_cmap : bool, optional (default=True)\n True: make colormap symmetric (ranging from -vmax to vmax).\n False: the colormap will go from the minimum of the volume to vmax.\n Set it to False if you are plotting a positive volume, e.g. an atlas\n or an anatomical image.\n dim : float, 'auto' (default='auto')\n Dimming factor applied to background image. By default, automatic\n heuristics are applied based upon the background image intensity.\n Accepted float values, where a typical scan is between -2 and 2\n (-2 = increase constrast; 2 = decrease contrast), but larger values\n can be used for a more pronounced effect. 0 means no dimming.\n vmax : float, or None (default=None)\n max value for mapping colors.\n If vmax is None and symmetric_cmap is True, vmax is the max\n absolute value of the volume.\n If vmax is None and symmetric_cmap is False, vmax is the max\n value of the volume.\n vmin : float, or None (default=None)\n min value for mapping colors.\n If `symmetric_cmap` is `True`, `vmin` is always equal to `-vmax` and\n cannot be chosen.\n If `symmetric_cmap` is `False`, `vmin` defaults to the min of the\n image, or 0 when a threshold is used.\n resampling_interpolation : string, optional (default continuous)\n The interpolation method for resampling.\n Can be 'continuous', 'linear', or 'nearest'.\n See nilearn.image.resample_img\n opacity : float in [0,1] (default 1)\n The level of opacity of the overlay (0: transparent, 1: opaque)\n\n Returns\n -------\n html_view : the html viewer object.\n It can be saved as an html page `html_view.save_as_html('test.html')`,\n or opened in a browser `html_view.open_in_browser()`.\n If the output is not requested and the current environment is a Jupyter\n notebook, the viewer will be inserted in the notebook.\n\n See Also\n --------\n nilearn.plotting.plot_stat_map:\n static plot of brain volume, on a single or multiple planes.\n nilearn.plotting.view_connectome:\n interactive 3d view of a connectome.\n nilearn.plotting.view_markers:\n interactive plot of colored markers.\n nilearn.plotting.view_surf, nilearn.plotting.view_img_on_surf:\n interactive view of statistical maps or surface atlases on the cortical\n surface.\n \"\"\"\n\n # Prepare the color map and thresholding\n mask_img, stat_map_img, data, threshold = _mask_stat_map(\n stat_map_img, threshold)\n colors = colorscale(cmap, data.ravel(), threshold=threshold,\n symmetric_cmap=symmetric_cmap, vmax=vmax,\n vmin=vmin)\n\n # Prepare the data for the cuts\n bg_img, bg_min, bg_max, black_bg = _load_bg_img(stat_map_img, bg_img,\n black_bg, dim)\n stat_map_img, mask_img = _resample_stat_map(stat_map_img, bg_img, mask_img,\n resampling_interpolation)\n cut_slices = _get_cut_slices(stat_map_img, cut_coords, threshold)\n\n # Now create a json-like object for the viewer, and converts in html\n json_view = _json_view_data(bg_img, stat_map_img, mask_img, bg_min, bg_max,\n colors, cmap, colorbar)\n json_view['params'] = _json_view_params(\n stat_map_img.shape, stat_map_img.affine, colors['vmin'],\n colors['vmax'], cut_slices, black_bg, opacity, draw_cross, annotate,\n title, colorbar, value=False)\n html_view = _json_view_to_html(json_view)\n\n return html_view\n"
] |
[
[
"numpy.abs",
"numpy.min",
"matplotlib.colors.Normalize",
"numpy.max",
"matplotlib.cm.get_cmap"
],
[
"numpy.ma.masked_inside",
"matplotlib.image.imsave",
"numpy.sqrt",
"numpy.linalg.inv",
"numpy.arange",
"numpy.ones",
"numpy.max",
"numpy.ceil",
"numpy.ma.masked_equal",
"numpy.ma.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
lrahmani/agents-aea
|
[
"9bd1d51530fc21bf41b5adea031cda19a94b048b",
"9bd1d51530fc21bf41b5adea031cda19a94b048b"
] |
[
"packages/fetchai/skills/tac_control_contract/helpers.py",
"packages/fetchai/skills/ml_train/model.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"This module contains the helpers methods for the controller agent.\"\"\"\n\nimport random\nfrom typing import Dict, List, Tuple, cast\n\nimport numpy as np\n\nfrom aea.contracts.ethereum import Contract\n\nQUANTITY_SHIFT = 1 # Any non-negative integer is fine.\nTOKEN_TYPE = 2\n\n\ndef generate_good_id_to_name(nb_goods: int, contract: Contract) -> Dict[str, str]:\n \"\"\"\n Generate ids for things.\n\n :param nb_goods: the number of things.\n :param contract: the instance of the contract\n :return: a dictionary mapping goods' ids to names.\n \"\"\"\n token_ids = contract.create_token_ids(TOKEN_TYPE, nb_goods) # type: ignore\n token_ids_dict = {\n str(token_id): \"NFT_{}\".format(token_id) for token_id in token_ids\n }\n return token_ids_dict\n\n\ndef determine_scaling_factor(money_endowment: int) -> float:\n \"\"\"\n Compute the scaling factor based on the money amount.\n\n :param money_endowment: the endowment of money for the agent\n :return: the scaling factor\n \"\"\"\n scaling_factor = 10.0 ** (len(str(money_endowment)) - 1)\n return scaling_factor\n\n\ndef generate_good_endowments(\n agent_addresses: List[str],\n good_ids: List[str],\n base_amount: int,\n uniform_lower_bound_factor: int,\n uniform_upper_bound_factor: int,\n) -> Dict[str, Dict[str, int]]:\n \"\"\"\n Compute good endowments per agent. That is, a matrix of shape (nb_agents, nb_goods).\n\n :param agent_addresses: the addresses of the agents\n :param good_ids: the list of good ids\n :param base_amount: the base amount of instances per good\n :param uniform_lower_bound_factor: the lower bound of the uniform distribution for the sampling of the good instance number.\n :param uniform_upper_bound_factor: the upper bound of the uniform distribution for the sampling of the good instance number.\n :return: the endowments matrix.\n \"\"\"\n # sample good instances\n nb_agents = len(agent_addresses)\n instances_per_good = _sample_good_instances(\n nb_agents,\n good_ids,\n base_amount,\n uniform_lower_bound_factor,\n uniform_upper_bound_factor,\n )\n # each agent receives at least base amount of each good\n base_assignment = {good_id: base_amount for good_id in good_ids}\n endowments = {agent_addr: base_assignment for agent_addr in agent_addresses}\n # randomly assign additional goods to create differences\n for good_id in good_ids:\n for _ in range(instances_per_good[good_id] - (base_amount * nb_agents)):\n idx = random.randint(0, nb_agents - 1) # nosec\n agent_addr = agent_addresses[idx]\n endowments[agent_addr][good_id] += 1\n return endowments\n\n\ndef generate_utility_params(\n agent_addresses: List[str], good_ids: List[str], scaling_factor: float\n) -> Dict[str, Dict[str, float]]:\n \"\"\"\n Compute the preference matrix. That is, a generic element e_ij is the utility of good j for agent i.\n\n :param agent_addresses: the agent addresses\n :param good_ids: the list of good ids\n :param scaling_factor: a scaling factor for all the utility params generated.\n :return: the preference matrix.\n \"\"\"\n decimals = 4 if len(good_ids) < 100 else 8\n utility_function_params = {} # type: Dict[str, Dict[str, float]]\n for agent_addr in agent_addresses:\n random_integers = [\n random.randint(1, 101) for _ in range(len(good_ids)) # nosec\n ]\n total = sum(random_integers)\n normalized_fractions = [\n round(i / float(total), decimals) for i in random_integers\n ]\n if not sum(normalized_fractions) == 1.0:\n normalized_fractions[-1] = round(\n 1.0 - sum(normalized_fractions[0:-1]), decimals\n )\n # scale the utility params\n params = {\n good_id: param * scaling_factor\n for good_id, param in zip(good_ids, normalized_fractions)\n }\n utility_function_params[agent_addr] = params\n\n return utility_function_params\n\n\ndef _sample_good_instances(\n nb_agents: int,\n good_ids: List[str],\n base_amount: int,\n uniform_lower_bound_factor: int,\n uniform_upper_bound_factor: int,\n) -> Dict[str, int]:\n \"\"\"\n Sample the number of instances for a good.\n\n :param nb_agents: the number of agents\n :param good_ids: the good ids\n :param base_amount: the base amount of instances per good\n :param uniform_lower_bound_factor: the lower bound factor of a uniform distribution\n :param uniform_upper_bound_factor: the upper bound factor of a uniform distribution\n :return: the number of instances I sampled.\n \"\"\"\n a = base_amount * nb_agents + nb_agents * uniform_lower_bound_factor\n b = base_amount * nb_agents + nb_agents * uniform_upper_bound_factor\n # Return random integer in range [a, b]\n nb_instances = {good_id: round(np.random.uniform(a, b)) for good_id in good_ids}\n return nb_instances\n\n\ndef generate_money_endowments(\n agent_addresses: List[str], money_endowment: int\n) -> Dict[str, int]:\n \"\"\"\n Compute the initial money amounts for each agent.\n\n :param agent_addresses: addresses of the agents.\n :param money_endowment: money endowment per agent.\n :return: the list of initial money amounts.\n \"\"\"\n return {agent_addr: money_endowment for agent_addr in agent_addresses}\n\n\ndef generate_equilibrium_prices_and_holdings(\n endowments: Dict[str, Dict[str, int]],\n utility_function_params: Dict[str, Dict[str, float]],\n money_endowment: Dict[str, int],\n scaling_factor: float,\n quantity_shift: int = QUANTITY_SHIFT,\n) -> Tuple[Dict[str, float], Dict[str, Dict[str, float]], Dict[str, float]]:\n \"\"\"\n Compute the competitive equilibrium prices and allocation.\n\n :param endowments: endowments of the agents\n :param utility_function_params: utility function params of the agents (already scaled)\n :param money_endowment: money endowment per agent.\n :param scaling_factor: a scaling factor for all the utility params generated.\n :param quantity_shift: a factor to shift the quantities in the utility function (to ensure the natural logarithm can be used on the entire range of quantities)\n :return: the lists of equilibrium prices, equilibrium good holdings and equilibrium money holdings\n \"\"\"\n # create ordered lists\n agent_addresses = []\n good_ids = []\n good_ids_to_idx = {}\n endowments_l = []\n utility_function_params_l = []\n money_endowment_l = []\n count = 0\n for agent_addr, endowment in endowments.items():\n agent_addresses.append(agent_addr)\n money_endowment_l.append(money_endowment[agent_addr])\n temp_e = [0] * len(endowment.keys())\n temp_u = [0.0] * len(endowment.keys())\n idx = 0\n for good_id, quantity in endowment.items():\n if count == 0:\n good_ids.append(good_id)\n good_ids_to_idx[good_id] = idx\n idx += 1\n temp_e[good_ids_to_idx[good_id]] = quantity\n temp_u[good_ids_to_idx[good_id]] = utility_function_params[agent_addr][\n good_id\n ]\n count += 1\n endowments_l.append(temp_e)\n utility_function_params_l.append(temp_u)\n\n # maths\n endowments_a = np.array(endowments_l, dtype=np.int)\n scaled_utility_function_params_a = np.array(\n utility_function_params_l, dtype=np.float\n ) # note, they are already scaled\n endowments_by_good = np.sum(endowments_a, axis=0)\n scaled_params_by_good = np.sum(scaled_utility_function_params_a, axis=0)\n eq_prices = np.divide(\n scaled_params_by_good, quantity_shift * len(endowments) + endowments_by_good\n )\n eq_good_holdings = (\n np.divide(scaled_utility_function_params_a, eq_prices) - quantity_shift\n )\n eq_money_holdings = (\n np.transpose(np.dot(eq_prices, np.transpose(endowments_a + quantity_shift)))\n + money_endowment_l\n - scaling_factor\n )\n\n # back to dicts\n eq_prices_dict = {\n good_id: cast(float, eq_price)\n for good_id, eq_price in zip(good_ids, eq_prices.tolist())\n }\n eq_good_holdings_dict = {\n agent_addr: {good_id: cast(float, v) for good_id, v in zip(good_ids, egh)}\n for agent_addr, egh in zip(agent_addresses, eq_good_holdings.tolist())\n }\n eq_money_holdings_dict = {\n agent_addr: cast(float, eq_money_holding)\n for agent_addr, eq_money_holding in zip(\n agent_addresses, eq_money_holdings.tolist()\n )\n }\n return eq_prices_dict, eq_good_holdings_dict, eq_money_holdings_dict\n\n\ndef _recover_uid(good_id) -> int:\n \"\"\"\n Get the uid part of the good id.\n\n :param str good_id: the good id\n :return: the uid\n \"\"\"\n uid = int(good_id.split(\"_\")[-2])\n return uid\n",
"# -*- coding: utf-8 -*-\n# ------------------------------------------------------------------------------\n#\n# Copyright 2018-2019 Fetch.AI Limited\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ------------------------------------------------------------------------------\n\n\"\"\"This module contains the strategy class.\"\"\"\nimport threading\nfrom pathlib import Path\nfrom queue import Queue\n\nimport tensorflow as tf\nfrom tensorflow import keras\n\nfrom aea.skills.base import Model\n\nDEFAULT_MODEL_CONFIG_PATH = str(Path(\"..\", \"..\", \"model.config\").resolve())\n\n\nclass MLModel(Model):\n \"\"\"This class defines a machine learning model.\"\"\"\n\n def __init__(self, **kwargs):\n \"\"\"Initialize the machine learning model.\"\"\"\n self._model_config_path = kwargs.pop(\n \"model_config_path\", DEFAULT_MODEL_CONFIG_PATH\n )\n super().__init__(**kwargs)\n\n # TODO this at the moment does not work - need to compile the model according to the network configuration\n # A better alternative is to save/load in HDF5 format, but that might require some system level dependencies\n # https://keras.io/getting-started/faq/#how-can-i-install-hdf5-or-h5py-to-save-my-models-in-keras\n # self._model = keras.Model.from_config(json.load(open(self._model_config_path)))\n self._lock = threading.RLock()\n self._weights = None\n\n self.graph = tf.get_default_graph()\n self.data_queue = Queue()\n self.training_thread = threading.Thread(target=self.training_loop)\n\n def setup(self) -> None:\n self.training_thread.start()\n\n def training_loop(self):\n \"\"\"\n Start the training loop.\n\n :return: None\n \"\"\"\n with self.graph.as_default():\n model = self._make_model()\n self._set_weights(model.get_weights())\n while True:\n data = self.data_queue.get()\n if data is None:\n break\n\n X, y, kwargs = data\n model.fit(X, y, **kwargs)\n loss, acc = model.evaluate(X, y, verbose=2)\n self.context.logger.info(\"Loss: {}, Acc: {}\".format(loss, acc))\n self._set_weights(model.get_weights())\n\n def _make_model(self):\n \"\"\"Make the model.\"\"\"\n model = keras.Sequential(\n [\n keras.layers.Flatten(input_shape=(28, 28)),\n keras.layers.Dense(128, activation=\"relu\"),\n keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n optimizer=\"adam\",\n loss=\"sparse_categorical_crossentropy\",\n metrics=[\"accuracy\"],\n )\n return model\n\n def _get_weights(self):\n \"\"\"Get the weights, thread-safe.\"\"\"\n with self._lock:\n return self._weights\n\n def _set_weights(self, weights):\n \"\"\"Set the weights, thread-safe.\"\"\"\n with self._lock:\n self._weights = weights\n\n def predict(self, *args, **kwargs):\n \"\"\"Predict.\"\"\"\n with self._lock:\n with self.graph.as_default():\n model = self._make_model()\n weights = self._get_weights()\n model.set_weights(weights)\n return model.predict(*args, **kwargs)\n\n def evaluate(self, *args, **kwargs):\n \"\"\"Predict.\"\"\"\n with self._lock:\n with self.graph.as_default():\n model = self._make_model()\n weights = self._get_weights()\n model.set_weights(weights)\n return model.evaluate(*args, **kwargs)\n\n def save(self):\n \"\"\"Save the model weights.\"\"\"\n # TODO to implement.\n\n def update(self, X, y, epochs):\n \"\"\"Update the ML model.\"\"\"\n self.data_queue.put((X, y, dict(epochs=epochs)))\n\n def teardown(self) -> None:\n self.data_queue.put(None)\n self.training_thread.join()\n"
] |
[
[
"numpy.transpose",
"numpy.random.uniform",
"numpy.array",
"numpy.sum",
"numpy.divide"
],
[
"tensorflow.keras.layers.Dense",
"tensorflow.get_default_graph",
"tensorflow.keras.layers.Flatten"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
godspeed5/Project_Face
|
[
"f9121e58081736ded918cee768e3ab713844031c"
] |
[
"src/utils/haar_cascade.py"
] |
[
"#!/usr/bin/env python\n\n## @package haar_cascade.py\n#\n# Massimiliano Patacchiola, Plymouth University 2016\n#\n# This module use the opencv haar cascade classifier\n# to find frontal and profile faces in a frame.\n\nimport numpy\nimport cv2\nimport sys\nimport os.path\n\n\nclass haarCascade:\n\n\n def __init__(self, frontalFacePath, profileFacePath):\n\n self.is_face_present = False\n\n #Represent the face type found\n # 1=Frontal, \n # 2=FrontRotLeft, 3=FronRotRight, \n # 4=ProfileLeft, 5=ProfileRight.\n self.face_type = 0\n\n self.face_x = 0\n self.face_y = 0\n self.face_h = 0\n self.face_w = 0\n\n #if(os.path.isfile(frontalFacePath) == False and os.path.isfile(profileFacePath)==False):\n #raise ValueError('haarCascade: the files specified do not exist.')\n\n self._frontalFacePath = frontalFacePath\n self._profileFacePath = profileFacePath\n\n self._frontalCascade = cv2.CascadeClassifier(frontalFacePath)\n self._profileCascade = cv2.CascadeClassifier(profileFacePath)\n\n\n ##\n # Find a face (frontal or profile) in the input image.\n # To find the right profile the input image is vertically flipped,\n # this is done because the training file for profile faces was \n # trained only on left profile.\n # @param inputImg the image where the cascade will be called\n # @param runFrontal if True it looks for frontal faces\n # @param runFrontalRotated if True it looks for frontal rotated faces\n # @param runLeft if True it looks for left profile faces\n # @param runRight if True it looks for right profile faces\n # @param frontalScaleFactor=1.1\n # @param rotatedFrontalScaleFactor=1.1\n # @param leftScaleFactor=1.1\n # @param rightScaleFactor=1.1\n # @param minSizeX=30\n # @param minSizeX=30\n # @param rotationAngleCCW (positive) angle for rotated face detector\n # @param rotationAngleCW (negative) angle for rotated face detector\n # @param lastFaceType to speed up the chain of classifier it is\n # possible to specify the first classifier to execute.\n #\n # Return code: 1=Frontal, 2=FrontRotLeft, 3=FronRotRight,\n # 4=ProfileLeft, 5=ProfileRight.\n def findFace(self, inputImg, \n runFrontal=True, runFrontalRotated=True, \n runLeft=True, runRight=True, \n frontalScaleFactor=1.1, rotatedFrontalScaleFactor=1.1, \n leftScaleFactor=1.1, rightScaleFactor=1.1,\n minSizeX=30, minSizeY=30, rotationAngleCCW=30, rotationAngleCW=-30, lastFaceType=0):\n\n #To speed up the chain we start it\n # from the last face-type found\n order = list()\n if(lastFaceType == 0 or lastFaceType==1): order = (1, 2, 3, 4, 5) \n if(lastFaceType == 2): order = (2, 1, 3, 4, 5)\n if(lastFaceType == 3): order = (3, 1, 2, 4, 5) \n if(lastFaceType == 4): order = (4, 1, 2, 3, 5)\n if(lastFaceType == 5): order = (5, 1, 2, 3, 4)\n\n\n for position in order:\n\n #Cascade: frontal faces\n if(runFrontal==True and position==1):\n self._findFrontalFace(inputImg, frontalScaleFactor, minSizeX, minSizeY)\n if(self.is_face_present == True):\n self.face_type = 1\n return (self.face_x, self.face_y, self.face_w, self.face_h)\n\n #Cascade: frontal faces rotated (Left)\n if(runFrontalRotated==True and position==2):\n rows, cols = numpy.shape(inputImg)\n M = cv2.getRotationMatrix2D((cols/2,rows/2),rotationAngleCCW,1) #30 degrees ccw rotation\n inputImgRot = cv2.warpAffine(inputImg, M, (cols,rows))\n self._findFrontalFace(inputImgRot, rotatedFrontalScaleFactor, minSizeX, minSizeY)\n if(self.is_face_present == True):\n self.face_type = 2\n return (self.face_x, self.face_y, self.face_w, self.face_h)\n\n #Cascade: frontal faces rotated (Right)\n if(runFrontalRotated==True and position==3):\n rows, cols = numpy.shape(inputImg)\n M = cv2.getRotationMatrix2D((cols/2,rows/2),rotationAngleCW,1) #30 degrees cw rotation\n inputImgRot = cv2.warpAffine(inputImg, M, (cols,rows))\n self._findFrontalFace(inputImgRot, rotatedFrontalScaleFactor, minSizeX, minSizeY)\n if(self.is_face_present == True):\n self.face_type = 3\n return (self.face_x, self.face_y, self.face_w, self.face_h)\n \n #Cascade: left profiles\n if(runLeft==True and position==4):\n self._findProfileFace(inputImg, leftScaleFactor, minSizeX, minSizeY)\n if(self.is_face_present == True):\n self.face_type = 4\n return (self.face_x, self.face_y, self.face_w, self.face_h)\n\n #Cascade: right profiles\n if(runRight==True and position==5):\n flipped_inputImg = cv2.flip(inputImg,1) \n self._findProfileFace(flipped_inputImg, rightScaleFactor, minSizeX, minSizeY)\n if(self.is_face_present == True):\n self.face_type = 5\n f_w, f_h = flipped_inputImg.shape[::-1] #finding the max dimensions\n self.face_x = f_w - (self.face_x + self.face_w) #reshape the x to unfold the mirroring\n return (self.face_x, self.face_y, self.face_w, self.face_h)\n\n\n #It returns zeros if nothing is found\n self.face_type = 0 \n self.is_face_present = False \n return (0, 0, 0, 0)\n\n\n ##\n # Find a frontal face in the input image\n # @param inputImg the image where the cascade will be called\n #\n def _findFrontalFace(self, inputImg, scaleFactor=1.1, minSizeX=30, minSizeY=30, minNeighbors=4):\n\n #Cascade: frontal faces\n faces = self._frontalCascade.detectMultiScale(\n inputImg,\n scaleFactor=scaleFactor,\n minNeighbors=minNeighbors,\n minSize=(minSizeX, minSizeY),\n flags=cv2.CASCADE_SCALE_IMAGE\n\n )\n\n if(len(faces) == 0):\n self.face_x = 0\n self.face_y = 0\n self.face_w = 0\n self.face_h = 0\n self.is_face_present = False\n return (0, 0, 0, 0)\n\n if(len(faces) == 1): \n self.face_x = faces[0][0]\n self.face_y = faces[0][1]\n self.face_w = faces[0][2]\n self.face_h = faces[0][3]\n self.is_face_present = True\n return (faces[0][0], faces[0][1], faces[0][2], faces[0][3])\n\n #If there are more than 1 face\n # it returns the position of\n # the one with the bigger area.\n if(len(faces) > 1):\n area_list = list() \n for x,y,h,w in faces:\n area_list.append(w*h)\n max_index = area_list.index(max(area_list)) #return the index of max element\n self.face_x = faces[max_index][0]\n self.face_y = faces[max_index][1]\n self.face_w = faces[max_index][2]\n self.face_h = faces[max_index][3]\n self.is_face_present = True\n return (faces[max_index][0], faces[max_index][1], faces[max_index][2], faces[max_index][3]) \n\n ##\n # Find a profile face in the input image\n # @param inputImg the image where the cascade will be called\n # \n def _findProfileFace(self, inputImg, scaleFactor=1.1, minSizeX=30, minSizeY=30, minNeighbors=4):\n\n #Cascade: left profile\n faces = self._profileCascade.detectMultiScale(\n inputImg,\n scaleFactor=scaleFactor,\n minNeighbors=minNeighbors,\n minSize=(minSizeX, minSizeY),\n flags=cv2.CASCADE_SCALE_IMAGE\n )\n\n if(len(faces) == 0):\n self.face_x = 0\n self.face_y = 0\n self.face_w = 0\n self.face_h = 0\n self.is_face_present = False\n return (0, 0, 0, 0)\n\n if(len(faces) == 1): \n self.face_x = faces[0][0]\n self.face_y = faces[0][1]\n self.face_w = faces[0][2]\n self.face_h = faces[0][3]\n self.is_face_present = True\n return (faces[0][0], faces[0][1], faces[0][2], faces[0][3])\n\n #If there are more than 1 face\n # it returns the position of\n # the one with the bigger area.\n if(len(faces) > 1):\n area_list = list() \n for x,y,h,w in faces:\n area_list.append(w*h)\n max_index = area_list.index(max(area_list)) #return the index of max element\n self.face_x = faces[max_index][0]\n self.face_y = faces[max_index][1]\n self.face_w = faces[max_index][2]\n self.face_h = faces[max_index][3]\n self.is_face_present = True\n return (faces[max_index][0], faces[max_index][1], faces[max_index][2], faces[max_index][3]) \n\n\n\n\n\n"
] |
[
[
"numpy.shape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
gargrohin/sngan.pytorch
|
[
"58d200c731935360f1b0fdcb1865c366c633e56c"
] |
[
"functions.py"
] |
[
"# -*- coding: utf-8 -*-\n# @Date : 2019-07-25\n# @Author : Xinyu Gong ([email protected])\n# @Link : None\n# @Version : 0.0\n\nimport comet_ml\nimport os\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom torchvision.utils import make_grid\nimport torch.nn.functional as F\nfrom imageio import imsave\nfrom tqdm import tqdm\nfrom copy import deepcopy\nimport logging\nfrom torch.autograd import Variable\n\nfrom utils.inception_score import get_inception_score\nfrom utils.fid_score import calculate_fid_given_paths\nimport models\n\nclass Log_loss(torch.nn.Module):\n def __init__(self):\n # negation is true when you minimize -log(val)\n super(Log_loss, self).__init__()\n \n def forward(self, x, negation=True):\n # shape of x will be [batch size]\n log_val = torch.log(x)\n loss = torch.sum(log_val)\n if negation:\n loss = torch.neg(loss)\n return loss\n \nclass Itself_loss(torch.nn.Module):\n def __init__(self):\n super(Itself_loss, self).__init__()\n \n def forward(self, x, negation=True):\n # shape of x will be [batch size]\n loss = torch.sum(x)\n if negation:\n loss = torch.neg(loss)\n return loss\n\n\nlogger = logging.getLogger(__name__)\n\ndef train_d2(args, gen_net: nn.Module, dis_net1: nn.Module, dis_net2: nn.Module, gen_optimizer, dis_optimizer1, dis_optimizer2, gen_avg_param, train_loader, epoch,\n writer_dict, schedulers=None, experiment=None):\n writer = writer_dict['writer']\n gen_step = 0\n\n criterion_log = Log_loss()\n criterion_itself = Itself_loss()\n\n # train mode\n gen_net = gen_net.train()\n dis_net1 = dis_net1.train()\n dis_net2 = dis_net2.train()\n\n d_loss1 = 0.0\n d_loss2 = 0.0\n g_loss = 0.0\n\n for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):\n global_steps = writer_dict['train_global_steps']\n\n # Adversarial ground truths\n real_imgs = imgs.type(torch.cuda.FloatTensor)\n\n # Sample noise as generator input\n z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n dis_optimizer1.zero_grad()\n dis_optimizer2.zero_grad()\n\n real_validity1 = dis_net1(real_imgs)\n real_validity2 = dis_net2(real_imgs)\n fake_imgs = gen_net(z).detach()\n assert fake_imgs.size() == real_imgs.size()\n\n fake_validity1 = dis_net1(fake_imgs.detach())\n fake_validity2 = dis_net2(fake_imgs.detach())\n\n d_loss1 = 0.2 * criterion_log(real_validity1) + criterion_itself(fake_validity1, False)\n d_loss1.backward()\n\n d_loss2 = criterion_itself(real_validity2, False) + 0.1*criterion_log(fake_validity2, False)\n d_loss2.backward()\n\n dis_optimizer1.step()\n dis_optimizer2.step()\n\n writer.add_scalar('d_loss1', d_loss1.item(), global_steps)\n writer.add_scalar('d_loss2', d_loss2.item(), global_steps)\n\n # -----------------\n # Train Generator\n # -----------------\n if global_steps % args.n_critic == 0:\n gen_optimizer.zero_grad()\n\n gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))\n gen_imgs = gen_net(gen_z)\n fake_validity1 = dis_net1(gen_imgs)\n fake_validity2 = dis_net2(gen_imgs)\n\n # cal loss\n g_loss = criterion_itself(fake_validity1) + 0.1*criterion_log(fake_validity2)\n g_loss.backward()\n gen_optimizer.step()\n\n # adjust learning rate\n # if schedulers:\n # gen_scheduler, dis_scheduler = schedulers\n # g_lr = gen_scheduler.step(global_steps)\n # d_lr = dis_scheduler.step(global_steps)\n # writer.add_scalar('LR/g_lr', g_lr, global_steps)\n # writer.add_scalar('LR/d_lr', d_lr, global_steps)\n\n # moving average weight\n for p, avg_p in zip(gen_net.parameters(), gen_avg_param):\n avg_p.mul_(0.999).add_(0.001, p.data)\n\n writer.add_scalar('g_loss', g_loss.item(), global_steps)\n gen_step += 1\n\n # verbose\n if gen_step and iter_idx % args.print_freq == 0:\n tqdm.write(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\" %\n (epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss1.item(), g_loss.item()))\n if experiment != None:\n experiment.log_metric(\"gen_loss\", g_loss.item())\n experiment.log_metric(\"dis_loss1\", d_loss1.item())\n experiment.log_metric(\"dis_loss2\", d_loss2.item())\n\n writer_dict['train_global_steps'] = global_steps + 1\n\n\ndef train_chainer(args, gen_net: nn.Module, dis_net: nn.Module, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch,\n writer_dict, schedulers=None, experiment=None):\n writer = writer_dict['writer']\n gen_step = 0\n\n # train mode\n gen_net = gen_net.train()\n dis_net = dis_net.train()\n\n d_loss = 0.0\n g_loss = 0.0\n\n for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):\n global_steps = writer_dict['train_global_steps']\n\n # Adversarial ground truths\n real_imgs = imgs.type(torch.cuda.FloatTensor)\n\n # Sample noise as generator input\n z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n dis_optimizer.zero_grad()\n\n real_validity = dis_net(real_imgs)\n fake_imgs = gen_net(z).detach()\n assert fake_imgs.size() == real_imgs.size()\n\n fake_validity = dis_net(fake_imgs)\n\n # cal loss\n d_loss = torch.mean(F.softplus(-real_validity)) + \\\n torch.mean(F.softplus(fake_validity))\n d_loss.backward()\n dis_optimizer.step()\n\n writer.add_scalar('d_loss', d_loss.item(), global_steps)\n\n # -----------------\n # Train Generator\n # -----------------\n if global_steps % args.n_critic == 0:\n gen_optimizer.zero_grad()\n\n gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))\n gen_imgs = gen_net(gen_z)\n fake_validity = dis_net(gen_imgs)\n\n # cal loss\n g_loss = torch.mean(F.softplus(-fake_validity))\n g_loss.backward()\n gen_optimizer.step()\n\n # adjust learning rate\n if schedulers:\n print(\"schedulars?\")\n gen_scheduler, dis_scheduler = schedulers\n g_lr = gen_scheduler.step(global_steps)\n d_lr = dis_scheduler.step(global_steps)\n writer.add_scalar('LR/g_lr', g_lr, global_steps)\n writer.add_scalar('LR/d_lr', d_lr, global_steps)\n\n # moving average weight\n # for p, avg_p in zip(gen_net.parameters(), gen_avg_param):\n # avg_p.mul_(0.999).add_(0.001, p.data)\n\n writer.add_scalar('g_loss', g_loss.item(), global_steps)\n gen_step += 1\n\n # verbose\n if gen_step and iter_idx % args.print_freq == 0:\n tqdm.write(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\" %\n (epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))\n if experiment != None:\n experiment.log_metric(\"gen_loss\", g_loss.item())\n experiment.log_metric(\"dis_loss\", d_loss.item())\n\n writer_dict['train_global_steps'] = global_steps + 1\n\ndef train_wgan(args, gen_net: nn.Module, multiD, gen_optimizer, multiD_opt, gen_avg_param, train_loader, epoch,\n writer_dict, schedulers=None, experiment=None):\n writer = writer_dict['writer']\n gen_step = 0\n\n n_dis = len(multiD)\n\n # train mode\n gen_net = gen_net.train()\n\n d_loss = 0.0\n g_loss = 0.0\n\n for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):\n global_steps = writer_dict['train_global_steps']\n\n for i in range(n_dis):\n multiD[i].train()\n for p in multiD[i].parameters():\n p.requires_grad = True\n multiD_opt[i].zero_grad()\n \n for p in gen_net.parameters():\n p.requires_grad = True\n\n # Adversarial ground truths\n x_real = imgs.type(torch.cuda.FloatTensor)\n y_real = torch.ones(imgs.shape[0], 1).cuda()\n\n # Sample noise as generator input\n z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))\n y_fake = torch.zeros(x_real.size()[0], 1).cuda()\n # ---------------------\n # Train Discriminator\n # ---------------------\n for i in range(n_dis):\n multiD_opt[i].zero_grad()\n \n gen_optimizer.zero_grad()\n x_fake = gen_net(z)\n\n # assert x_fake.size() == x_real.size()\n\n flag = True\n for i in range(n_dis):\n if flag:\n D_fake = multiD[i](x_fake)\n D_real = multiD[i](x_real)\n flag = False\n else:\n D_fake = torch.cat((D_fake, multiD[i](x_fake)), dim = 1)\n D_real = torch.cat((D_real, multiD[i](x_real)), dim = 1)\n \n ind = torch.argmin(D_fake, dim = 1)\n mask = torch.zeros((x_real.size()[0], n_dis)).cuda()\n mask2 = torch.zeros((x_real.size()[0], n_dis)).cuda()\n\n for i in range(mask.size()[0]):\n random_checker = np.random.randint(0,10)\n if random_checker > 7: #100 for no random thingie\n index = np.random.randint(0,n_dis)\n mask[i][index] = 1.0\n mask2[i][index] = 1.0\n else:\n mask[i][ind[i]] = 1.0\n mask2[i][ind[i]] = 1.0\n \n for i in range(mask.size()[0], mask2.size()[0]):\n mask2[i][np.random.randint(0,n_dis)] = 1.0\n alpha = Variable(torch.rand(x_real.size()))\n alpha = alpha.cuda()\n x_hat = alpha*x_fake + (1-alpha)*x_real\n flag = True\n for i in range(n_dis):\n if flag:\n d_x_hat = multiD[i](x_hat)\n flag = False\n else:\n d_x_hat = torch.cat((d_x_hat, multiD[i](x_hat)), dim = 1)\n d_x_hat = torch.sum(mask*d_x_hat, dim=1)\n # d_x_hat = multiD[0](x_hat)\n gradients = torch.autograd.grad(outputs=d_x_hat, inputs=x_hat,\n grad_outputs=torch.ones(d_x_hat.size()).cuda(),\n create_graph=True, retain_graph=True, only_inputs=True)[0]\n\n gradients = gradients.reshape(imgs.shape[0], -1)\n gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() \n LAMBDA=10\n loss = LAMBDA*gradient_penalty\n D_fake_output = torch.sum(mask2*D_fake, dim = 1)\n D_real_output = torch.sum(mask*D_real, dim = 1)\n \n # cal loss\n d_loss = -(torch.mean(D_real_output) - torch.mean(D_fake_output))\n d_loss += loss\n # d_loss = criterion(real_validity, y_real) + criterion(fake_validity, y_fake)\n d_loss.backward()\n for i in range(n_dis):\n multiD_opt[i].step()\n \n # for i in range(n_dis):\n # for p in multiD[i].parameters():\n # p.data.clamp_(-0.01, 0.01)\n\n writer.add_scalar('d_loss', d_loss.item(), global_steps)\n\n # -----------------\n # Train Generator\n # -----------------\n if global_steps % args.n_critic == 0:\n gen_optimizer.zero_grad()\n\n gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))\n fake_img = gen_net(gen_z)\n\n critic_fakes = []\n lit = np.zeros(n_dis)\n for i in range(n_dis):\n for p in multiD[i].parameters():\n p.requires_grad = False\n critic_fake = multiD[i](fake_img)\n critic_fakes.append(critic_fake)\n lit[i] = torch.sum(critic_fake).item()\n loss_sort = np.argsort(lit)\n weights = np.random.dirichlet(np.ones(n_dis))\n weights = np.sort(weights)[::-1]\n\n flag = False\n for i in range(len(critic_fakes)):\n if flag == False:\n critic_fake = weights[i]*critic_fakes[loss_sort[i]]\n flag = True\n else:\n critic_fake = torch.add(critic_fake, weights[i]*critic_fakes[loss_sort[i]])\n\n # cal loss\n g_loss = -torch.mean(critic_fake)\n # g_loss = criterion(fake_validity, y_fake)\n g_loss.backward()\n gen_optimizer.step()\n\n # adjust learning rate\n if schedulers:\n gen_scheduler, dis_scheduler = schedulers\n g_lr = gen_scheduler.step(global_steps)\n d_lr = dis_scheduler.step(global_steps)\n writer.add_scalar('LR/g_lr', g_lr, global_steps)\n writer.add_scalar('LR/d_lr', d_lr, global_steps)\n\n # moving average weight\n for p, avg_p in zip(gen_net.parameters(), gen_avg_param):\n avg_p.mul_(0.999).add_(0.001, p.data)\n\n writer.add_scalar('g_loss', g_loss.item(), global_steps)\n gen_step += 1\n\n # verbose\n if gen_step and iter_idx % args.print_freq == 0:\n tqdm.write(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\" %\n (epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))\n if experiment != None:\n experiment.log_metric(\"gen_loss\", g_loss.item())\n experiment.log_metric(\"dis_loss\", d_loss.item())\n\n writer_dict['train_global_steps'] = global_steps + 1\n\n# def train_multi(args, gen_net: nn.Module, multiD, gen_optimizer, multiD_opt, gen_avg_param, train_loader, epoch,\n# writer_dict, schedulers=None, experiment=None):\n# writer = writer_dict['writer']\n# gen_step = 0\n\n# criterion = nn.BCELoss()\n# n_dis = len(multiD)\n\n# # train mode\n# gen_net = gen_net.train()\n\n# d_loss = 0.0\n# g_loss = 0.0\n\n# for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):\n# global_steps = writer_dict['train_global_steps']\n\n# for i in range(n_dis):\n# multiD[i].train()\n# for p in multiD[i].parameters():\n# p.requires_grad = True\n# multiD_opt[i].zero_grad()\n \n# for p in gen_net.parameters():\n# p.requires_grad = True\n\n# # Adversarial ground truths\n# x_real = imgs.type(torch.cuda.FloatTensor)\n# y_real = torch.ones(imgs.shape[0], 1).cuda()\n\n# # Sample noise as generator input\n# z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))\n# y_fake = torch.zeros(x_real.size()[0], 1).cuda()\n# # ---------------------\n# # Train Discriminator\n# # ---------------------\n# for i in range(n_dis):\n# multiD_opt[i].zero_grad()\n \n# gen_optimizer.zero_grad()\n# x_fake = gen_net(z).detach()\n\n# # assert x_fake.size() == x_real.size()\n\n# flag = True\n# for i in range(n_dis):\n# if flag:\n# D_fake = multiD[i](x_fake)\n# D_real = multiD[i](x_real)\n# flag = False\n# else:\n# D_fake = torch.cat((D_fake, multiD[i](x_fake)), dim = 1)\n# D_real = torch.cat((D_real, multiD[i](x_real)), dim = 1)\n \n# ind = torch.argmin(D_fake, dim = 1)\n# mask = torch.zeros((x_real.size()[0], n_dis)).cuda()\n# mask2 = torch.zeros((x_fake.size()[0], n_dis)).cuda()\n\n# for i in range(mask2.size()[0]):\n# random_checker = np.random.randint(0,10)\n# if random_checker > 7: #100 for no random thingie\n# index = np.random.randint(0,n_dis)\n# mask[i][index] = 1.0\n# mask2[i][index] = 1.0\n# else:\n# mask[i][ind[i]] = 1.0\n# mask2[i][ind[i]] = 1.0\n \n# # for i in range(mask2.size()[0], mask.size()[0]):\n# # id = np.random.randint(0,n_dis)\n# # if id != ind[i - mask2.size()[0]]:\n# # mask[i][id] = 1.0\n \n# D_fake_output = torch.sum(mask2*D_fake, dim = 1)\n# D_real_output = torch.sum(mask*D_real, dim = 1)\n\n# #cos = torch.nn.CosineSimilarity()\n# #dot = cos(D_fake[0], D_fake[1])\n \n# # cal loss\n# d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - D_real_output)) + \\\n# torch.mean(nn.ReLU(inplace=True)(1 + D_fake_output))\n# # d_loss = criterion(real_validity, y_real) + criterion(fake_validity, y_fake)\n# d_loss.backward()\n# for i in range(n_dis):\n# multiD_opt[i].step()\n\n# writer.add_scalar('d_loss', d_loss.item(), global_steps)\n\n# # -----------------\n# # Train Generator\n# # -----------------\n# if global_steps % args.n_critic == 0:\n# gen_optimizer.zero_grad()\n\n# gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))\n# fake_img = gen_net(gen_z)\n\n# critic_fakes = []\n# lit = np.zeros(n_dis)\n# for i in range(n_dis):\n# for p in multiD[i].parameters():\n# p.requires_grad = False\n# critic_fake = multiD[i](fake_img)\n# critic_fakes.append(critic_fake)\n# lit[i] = torch.sum(critic_fake).item()\n# loss_sort = np.argsort(lit)\n# weights = np.random.dirichlet(np.ones(n_dis))\n# weights = np.sort(weights)[::-1]\n\n# flag = False\n# for i in range(len(critic_fakes)):\n# if flag == False:\n# critic_fake = weights[i]*critic_fakes[loss_sort[i]]\n# flag = True\n# else:\n# critic_fake = torch.add(critic_fake, weights[i]*critic_fakes[loss_sort[i]])\n\n# # cal loss\n# g_loss = -torch.mean(critic_fake)\n# # g_loss = criterion(fake_validity, y_fake)\n# g_loss.backward()\n# gen_optimizer.step()\n\n# # adjust learning rate\n# if schedulers:\n# gen_scheduler, dis_scheduler = schedulers\n# g_lr = gen_scheduler.step(global_steps)\n# d_lr = dis_scheduler.step(global_steps)\n# writer.add_scalar('LR/g_lr', g_lr, global_steps)\n# writer.add_scalar('LR/d_lr', d_lr, global_steps)\n\n# # moving average weight\n# for p, avg_p in zip(gen_net.parameters(), gen_avg_param):\n# avg_p.mul_(0.999).add_(0.001, p.data)\n\n# writer.add_scalar('g_loss', g_loss.item(), global_steps)\n# gen_step += 1\n\n# # verbose\n# if gen_step and iter_idx % args.print_freq == 0:\n# tqdm.write(\n# \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\" %\n# (epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))\n# if experiment != None:\n# experiment.log_metric(\"gen_loss\", g_loss.item())\n# experiment.log_metric(\"dis_loss\", d_loss.item())\n\n# writer_dict['train_global_steps'] = global_steps + 1\n\n\n\n\ndef train_multi(args, gen_net: nn.Module, multiD, gen_optimizer, multiD_opt, gen_avg_param, train_loader, epoch,\n writer_dict, alpha_m, t, check_ep, alpha, schedulers=None, experiment=None):\n writer = writer_dict['writer']\n gen_step = 0\n n_dis = len(multiD)\n def weights_init(m):\n classname = m.__class__.__name__\n if classname.find('Conv2d') != -1:\n if args.init_type == 'normal':\n nn.init.normal_(m.weight.data, 0.0, 0.02)\n elif args.init_type == 'orth':\n nn.init.orthogonal_(m.weight.data)\n elif args.init_type == 'xavier_uniform':\n nn.init.xavier_uniform(m.weight.data, 1.)\n else:\n raise NotImplementedError('{} unknown inital type'.format(args.init_type))\n elif classname.find('BatchNorm2d') != -1:\n nn.init.normal_(m.weight.data, 1.0, 0.02)\n nn.init.constant_(m.bias.data, 0.0)\n for imgs,_ in train_loader:\n exemplar = imgs[:15].type(torch.cuda.FloatTensor)\n break\n addno = False\n # check_ep = 10\n # check_ep = int(check_ep*t)\n if n_dis == 1:\n check_ep = 5\n if epoch > 1 and epoch % check_ep == 0:\n check_ep = int(check_ep*t)\n exemplar_flag = True\n with torch.no_grad():\n for dis_index in range(n_dis):\n if exemplar_flag:\n exemplar_res = multiD[dis_index](exemplar).unsqueeze(0)\n exemplar_flag = False\n else:\n exemplar_res = torch.cat((multiD[dis_index](exemplar).unsqueeze(0), exemplar_res), dim=0)\n print(exemplar_res.size())\n alpha = 1.5\n if n_dis > 2:\n alpha = alpha*alpha_m\n print('\\n',exemplar_res, torch.mean(exemplar_res, dim = 1))\n exemplar_max,_ = torch.max(exemplar_res, dim = 1)\n exemplar_min,_ = torch.min(exemplar_res, dim = 1)\n print('\\n',exemplar_min)\n # for i in range(n_dis):\n # if exemplar_min[i].item() > alpha[0]*torch.mean(exemplar_res[i]).item():\n # addno = True\n # print(exemplar_min[i].item(), torch.mean(exemplar_res[i]).item())\n # if n_dis > 3:\n # addno = False\n # \"\\nAdd True but N_dis > 4\\n\"\n # break\n # break\n for i in range(n_dis):\n if addno:\n break\n if exemplar_max[i].item() > alpha*torch.mean(exemplar_res[i]).item():\n addno = True\n print(exemplar_min[i].item(), torch.mean(exemplar_res[i]).item())\n # if n_dis > 3:\n # addno = False\n # \"\\nAdd True but N_dis > 4\\n\"\n # break\n break\n if addno:\n # print('\\n adding D \\n')\n addno = False\n d_new = eval('models.'+args.model+'.Discriminator')(args=args).cuda()\n# d_new.apply(weights_init)\n multiD.append(d_new)\n multiD_opt.append(torch.optim.Adam(filter(lambda p: p.requires_grad, multiD[n_dis].parameters()),\n args.d_lr, (args.beta1, args.beta2)))\n n_dis +=1\n # print('\\nn_dis: ', n_dis)\n # dcopy = deepcopy(multiD[n_dis-1]).cpu()\n # sdict = dcopy.state_dict()\n # for i, p in enumerate(sdict):\n # if i <4:\n # continue\n # # print(p)\n # sdict[p] = 0.01*torch.randn(sdict[p].size())\n # dcopy.load_state_dict(sdict)\n # multiD.append(dcopy.cuda())\n # sdict = multiD[n_dis-1].state_dict()\n # for i, p in enumerate(sdict):\n # # if i <4:\n # # continue\n # # print(p)\n # sdict[p] = sdict[p] + 0.1*torch.randn(sdict[p].size()).cuda()\n # multiD[n_dis-1].load_state_dict(sdict)\n # multiD_opt.append(torch.optim.Adam(multiD[n_dis].parameters(), lr = args.lr, betas = (0.5,0.999)))\n # n_dis = n_dis + 1\n # train mode\n gen_net = gen_net.train()\n d_loss = 0.0\n g_loss = 0.0\n for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):\n global_steps = writer_dict['train_global_steps']\n for i in range(n_dis):\n multiD[i].train()\n for p in multiD[i].parameters():\n p.requires_grad = True\n multiD_opt[i].zero_grad()\n for p in gen_net.parameters():\n p.requires_grad = True\n # Adversarial ground truths\n x_real = imgs.type(torch.cuda.FloatTensor)\n y_real = torch.ones(imgs.shape[0], 1).cuda()\n # Sample noise as generator input\n z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))\n y_fake = torch.zeros(x_real.size()[0], 1).cuda()\n # ---------------------\n # Train Discriminator\n # ---------------------\n for i in range(n_dis):\n multiD_opt[i].zero_grad()\n gen_optimizer.zero_grad()\n x_fake = gen_net(z).detach()\n # assert x_fake.size() == x_real.size()\n flag = True\n for i in range(n_dis):\n if flag:\n D_fake = multiD[i](x_fake)\n D_real = multiD[i](x_real)\n flag = False\n else:\n D_fake = torch.cat((D_fake, multiD[i](x_fake)), dim = 1)\n D_real = torch.cat((D_real, multiD[i](x_real)), dim = 1)\n ind = torch.argmin(D_fake, dim = 1)\n mask = torch.zeros((x_real.size()[0], n_dis)).cuda()\n mask2 = torch.zeros((x_real.size()[0], n_dis)).cuda()\n for i in range(mask.size()[0]):\n random_checker = np.random.randint(0,10)\n if random_checker > 7: #100 for no random thingie\n index = np.random.randint(0,n_dis)\n mask[i][index] = 1.0\n mask2[i][index] = 1.0\n else:\n mask[i][ind[i]] = 1.0\n mask2[i][ind[i]] = 1.0\n # for i in range(mask.size()[0], mask2.size()[0]):\n # mask2[i][np.random.randint(0,n_dis)] = 1.0\n D_fake_output = torch.sum(mask2*D_fake, dim = 1)\n D_real_output = torch.sum(mask*D_real, dim = 1)\n # cal loss\n d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - D_real_output)) + \\\n torch.mean(nn.ReLU(inplace=True)(1 + D_fake_output))\n # d_loss = criterion(real_validity, y_real) + criterion(fake_validity, y_fake)\n d_loss.backward()\n for i in range(n_dis):\n multiD_opt[i].step()\n writer.add_scalar('d_loss', d_loss.item(), global_steps)\n # -----------------\n # Train Generator\n # -----------------\n if global_steps % args.n_critic == 0:\n gen_optimizer.zero_grad()\n gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))\n fake_img = gen_net(gen_z)\n critic_fakes = []\n lit = np.zeros(n_dis)\n for i in range(n_dis):\n for p in multiD[i].parameters():\n p.requires_grad = False\n critic_fake = multiD[i](fake_img)\n critic_fakes.append(critic_fake)\n lit[i] = torch.sum(critic_fake).item()\n loss_sort = np.argsort(lit)\n weights = np.random.dirichlet(np.ones(n_dis))\n weights = np.sort(weights)[::-1]\n flag = False\n for i in range(len(critic_fakes)):\n if flag == False:\n critic_fake = weights[i]*critic_fakes[loss_sort[i]]\n flag = True\n else:\n critic_fake = torch.add(critic_fake, weights[i]*critic_fakes[loss_sort[i]])\n # cal loss\n g_loss = -torch.mean(critic_fake)\n # g_loss = criterion(fake_validity, y_fake)\n g_loss.backward()\n gen_optimizer.step()\n # adjust learning rate\n if schedulers:\n gen_scheduler, dis_scheduler = schedulers\n g_lr = gen_scheduler.step(global_steps)\n d_lr = dis_scheduler.step(global_steps)\n writer.add_scalar('LR/g_lr', g_lr, global_steps)\n writer.add_scalar('LR/d_lr', d_lr, global_steps)\n # moving average weight\n for p, avg_p in zip(gen_net.parameters(), gen_avg_param):\n avg_p.mul_(0.999).add_(0.001, p.data)\n writer.add_scalar('g_loss', g_loss.item(), global_steps)\n gen_step += 1\n # verbose\n if gen_step and iter_idx % args.print_freq == 0:\n tqdm.write(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\" %\n (epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))\n if experiment != None:\n experiment.log_metric(\"gen_loss\", g_loss.item())\n experiment.log_metric(\"dis_loss\", d_loss.item())\n writer_dict['train_global_steps'] = global_steps + 1\n return multiD, multiD_opt, check_ep, alpha\n\n\ndef train(args, gen_net: nn.Module, dis_net: nn.Module, gen_optimizer, dis_optimizer, gen_avg_param, train_loader, epoch,\n writer_dict, schedulers=None, experiment=None):\n writer = writer_dict['writer']\n gen_step = 0\n\n criterion = nn.BCELoss() \n\n # train mode\n gen_net = gen_net.train()\n dis_net = dis_net.train()\n\n d_loss = 0.0\n g_loss = 0.0\n\n for iter_idx, (imgs, _) in enumerate(tqdm(train_loader)):\n global_steps = writer_dict['train_global_steps']\n\n # Adversarial ground truths\n real_imgs = imgs.type(torch.cuda.FloatTensor)\n y_real = torch.ones(imgs.shape[0], 1).cuda()\n\n # Sample noise as generator input\n z = torch.cuda.FloatTensor(np.random.normal(0, 1, (imgs.shape[0], args.latent_dim)))\n y_fake = torch.zeros(real_imgs.size()[0], 1).cuda()\n\n # ---------------------\n # Train Discriminator\n # ---------------------\n dis_optimizer.zero_grad()\n\n real_validity = dis_net(real_imgs)\n fake_imgs = gen_net(z).detach()\n assert fake_imgs.size() == real_imgs.size()\n\n fake_validity = dis_net(fake_imgs)\n\n # cal loss\n d_loss = torch.mean(nn.ReLU(inplace=True)(1.0 - real_validity)) + \\\n torch.mean(nn.ReLU(inplace=True)(1 + fake_validity))\n # d_loss = criterion(real_validity, y_real) + criterion(fake_validity, y_fake)\n d_loss.backward()\n dis_optimizer.step()\n\n writer.add_scalar('d_loss', d_loss.item(), global_steps)\n\n # -----------------\n # Train Generator\n # -----------------\n if global_steps % args.n_critic == 0:\n gen_optimizer.zero_grad()\n\n gen_z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.gen_batch_size, args.latent_dim)))\n gen_imgs = gen_net(gen_z)\n fake_validity = dis_net(gen_imgs)\n y_fake = torch.zeros(args.gen_batch_size, 1).cuda()\n\n # cal loss\n g_loss = -torch.mean(fake_validity)\n # g_loss = criterion(fake_validity, y_fake)\n g_loss.backward()\n gen_optimizer.step()\n\n # adjust learning rate\n if schedulers:\n gen_scheduler, dis_scheduler = schedulers\n g_lr = gen_scheduler.step(global_steps)\n d_lr = dis_scheduler.step(global_steps)\n writer.add_scalar('LR/g_lr', g_lr, global_steps)\n writer.add_scalar('LR/d_lr', d_lr, global_steps)\n\n # moving average weight\n for p, avg_p in zip(gen_net.parameters(), gen_avg_param):\n avg_p.mul_(0.999).add_(0.001, p.data)\n\n writer.add_scalar('g_loss', g_loss.item(), global_steps)\n gen_step += 1\n\n # verbose\n if gen_step and iter_idx % args.print_freq == 0:\n tqdm.write(\n \"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]\" %\n (epoch, args.max_epoch, iter_idx % len(train_loader), len(train_loader), d_loss.item(), g_loss.item()))\n if experiment != None:\n experiment.log_metric(\"gen_loss\", g_loss.item())\n experiment.log_metric(\"dis_loss\", d_loss.item())\n\n writer_dict['train_global_steps'] = global_steps + 1\n\n\ndef validate(args, fixed_z, fid_stat, gen_net: nn.Module, writer_dict):\n writer = writer_dict['writer']\n global_steps = writer_dict['valid_global_steps']\n\n # eval mode\n gen_net = gen_net.eval()\n\n # generate images\n sample_imgs = gen_net(fixed_z)\n img_grid = make_grid(sample_imgs, nrow=8, normalize=True, scale_each=True)\n\n # get fid and inception score\n fid_buffer_dir = os.path.join(args.path_helper['sample_path'], 'fid_buffer')\n try:\n os.makedirs(fid_buffer_dir)\n except:\n pass\n\n eval_iter = args.num_eval_imgs // args.eval_batch_size\n img_list = list()\n with torch.no_grad():\n for iter_idx in tqdm(range(eval_iter), desc='sample images'):\n z = torch.cuda.FloatTensor(np.random.normal(0, 1, (args.eval_batch_size, args.latent_dim)))\n\n # Generate a batch of images\n gen_imgs = gen_net(z).mul_(127.5).add_(127.5).clamp_(0.0, 255.0).permute(0, 2, 3, 1).to('cpu', torch.uint8).numpy()\n for img_idx, img in enumerate(gen_imgs):\n file_name = os.path.join(fid_buffer_dir, f'iter{iter_idx}_b{img_idx}.png')\n imsave(file_name, img)\n img_list.extend(list(gen_imgs))\n\n # get inception score\n logger.info('=> calculate inception score')\n\n torch.cuda.empty_cache()\n mean, std = get_inception_score(img_list)\n\n # get fid score\n logger.info('=> calculate fid score')\n fid_score = 0 #calculate_fid_given_paths([fid_buffer_dir, fid_stat], inception_path=None)\n\n # os.system('rm -r {}'.format(fid_buffer_dir))\n\n writer.add_image('sampled_images', img_grid, global_steps)\n writer.add_scalar('Inception_score/mean', mean, global_steps)\n writer.add_scalar('Inception_score/std', std, global_steps)\n # writer.add_scalar('FID_score', fid_score, global_steps)\n\n writer_dict['valid_global_steps'] = global_steps + 1\n\n return mean, fid_score, sample_imgs\n\n\n\n\n\nclass LinearLrDecay(object):\n def __init__(self, optimizer, start_lr, end_lr, decay_start_step, decay_end_step):\n\n assert start_lr > end_lr\n self.optimizer = optimizer\n self.delta = (start_lr - end_lr) / (decay_end_step - decay_start_step)\n self.decay_start_step = decay_start_step\n self.decay_end_step = decay_end_step\n self.start_lr = start_lr\n self.end_lr = end_lr\n\n def step(self, current_step):\n if current_step <= self.decay_start_step:\n lr = self.start_lr\n elif current_step >= self.decay_end_step:\n lr = self.end_lr\n else:\n lr = self.start_lr - self.delta * (current_step - self.decay_start_step)\n for param_group in self.optimizer.param_groups:\n param_group['lr'] = lr\n return lr\n\n\ndef load_params(model, new_param):\n for p, new_p in zip(model.parameters(), new_param):\n p.data.copy_(new_p)\n\n\ndef copy_params(model):\n flatten = deepcopy(list(p.data for p in model.parameters()))\n return flatten\n"
] |
[
[
"torch.mean",
"torch.max",
"torch.zeros",
"torch.sum",
"torch.neg",
"torch.no_grad",
"numpy.random.randint",
"torch.ones",
"torch.add",
"torch.argmin",
"numpy.zeros",
"torch.nn.functional.softplus",
"torch.nn.init.constant_",
"torch.min",
"torch.cuda.empty_cache",
"torch.nn.BCELoss",
"torch.log",
"torch.nn.init.normal_",
"numpy.argsort",
"numpy.sort",
"numpy.ones",
"numpy.random.normal",
"torch.nn.init.orthogonal_",
"torch.nn.ReLU",
"torch.nn.init.xavier_uniform"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
msalvaris/IIC
|
[
"2915d2de160608aaf16d776eaebf93c45d07223a",
"2915d2de160608aaf16d776eaebf93c45d07223a"
] |
[
"iic/utils/cluster/data.py",
"iic/scripts/segmentation/analysis/clone_and_eval.py"
] |
[
"import sys\nfrom datetime import datetime\n\nimport torch\nimport torchvision\nfrom torch.utils.data import ConcatDataset\n\nfrom iic.datasets.clustering.truncated_dataset import TruncatedDataset\nfrom iic.utils.cluster.transforms import sobel_make_transforms, \\\n greyscale_make_transforms\nfrom iic.utils.semisup.dataset import TenCropAndFinish\nfrom .general import reorder_train_deterministic\n\n\n# Used by sobel and greyscale clustering twohead scripts -----------------------\n\ndef cluster_twohead_create_dataloaders(config):\n assert (config.mode == \"IID\")\n assert (config.twohead)\n\n target_transform = None\n\n if \"CIFAR\" in config.dataset:\n config.train_partitions_head_A = [True, False]\n config.train_partitions_head_B = config.train_partitions_head_A\n\n config.mapping_assignment_partitions = [True, False]\n config.mapping_test_partitions = [True, False]\n\n if config.dataset == \"CIFAR10\":\n dataset_class = torchvision.datasets.CIFAR10\n elif config.dataset == \"CIFAR100\":\n dataset_class = torchvision.datasets.CIFAR100\n elif config.dataset == \"CIFAR20\":\n dataset_class = torchvision.datasets.CIFAR100\n target_transform = _cifar100_to_cifar20\n else:\n assert (False)\n\n # datasets produce either 2 or 5 channel images based on config.include_rgb\n tf1, tf2, tf3 = sobel_make_transforms(config)\n\n elif config.dataset == \"STL10\":\n assert (config.mix_train)\n if not config.stl_leave_out_unlabelled:\n print(\"adding unlabelled data for STL10\")\n config.train_partitions_head_A = [\"train+unlabeled\", \"test\"]\n else:\n print(\"not using unlabelled data for STL10\")\n config.train_partitions_head_A = [\"train\", \"test\"]\n\n config.train_partitions_head_B = [\"train\", \"test\"]\n\n config.mapping_assignment_partitions = [\"train\", \"test\"]\n config.mapping_test_partitions = [\"train\", \"test\"]\n\n dataset_class = torchvision.datasets.STL10\n\n # datasets produce either 2 or 5 channel images based on config.include_rgb\n tf1, tf2, tf3 = sobel_make_transforms(config)\n\n elif config.dataset == \"MNIST\":\n config.train_partitions_head_A = [True, False]\n config.train_partitions_head_B = config.train_partitions_head_A\n\n config.mapping_assignment_partitions = [True, False]\n config.mapping_test_partitions = [True, False]\n\n dataset_class = torchvision.datasets.MNIST\n\n tf1, tf2, tf3 = greyscale_make_transforms(config)\n\n else:\n assert (False)\n\n print(\"Making datasets with %s and %s\" % (dataset_class, target_transform))\n sys.stdout.flush()\n\n dataloaders_head_A = \\\n _create_dataloaders(config, dataset_class, tf1, tf2,\n partitions=config.train_partitions_head_A,\n target_transform=target_transform)\n\n dataloaders_head_B = \\\n _create_dataloaders(config, dataset_class, tf1, tf2,\n partitions=config.train_partitions_head_B,\n target_transform=target_transform)\n\n mapping_assignment_dataloader = \\\n _create_mapping_loader(config, dataset_class, tf3,\n partitions=config.mapping_assignment_partitions,\n target_transform=target_transform)\n\n mapping_test_dataloader = \\\n _create_mapping_loader(config, dataset_class, tf3,\n partitions=config.mapping_test_partitions,\n target_transform=target_transform)\n\n return dataloaders_head_A, dataloaders_head_B, \\\n mapping_assignment_dataloader, mapping_test_dataloader\n\n\n# Used by sobel and greyscale clustering single head scripts -------------------\n\ndef cluster_create_dataloaders(config):\n assert (config.mode == \"IID+\")\n assert (not config.twohead)\n\n target_transform = None\n\n # separate train/test sets\n if \"CIFAR\" in config.dataset:\n config.train_partitions = [True]\n config.mapping_assignment_partitions = [True]\n config.mapping_test_partitions = [False]\n\n if config.dataset == \"CIFAR10\":\n dataset_class = torchvision.datasets.CIFAR10\n elif config.dataset == \"CIFAR100\":\n dataset_class = torchvision.datasets.CIFAR100\n elif config.dataset == \"CIFAR20\":\n dataset_class = torchvision.datasets.CIFAR100\n target_transform = _cifar100_to_cifar20\n else:\n assert (False)\n\n # datasets produce either 2 or 5 channel images based on config.include_rgb\n tf1, tf2, tf3 = sobel_make_transforms(config)\n\n elif config.dataset == \"STL10\":\n config.train_partitions = [\"train+unlabeled\"]\n config.mapping_assignment_partitions = [\"train\"]\n config.mapping_test_partitions = [\"test\"]\n\n dataset_class = torchvision.datasets.STL10\n\n # datasets produce either 2 or 5 channel images based on config.include_rgb\n tf1, tf2, tf3 = sobel_make_transforms(config)\n\n elif config.dataset == \"MNIST\":\n config.train_partitions = [True]\n config.mapping_assignment_partitions = [True]\n config.mapping_test_partitions = [False]\n\n dataset_class = torchvision.datasets.MNIST\n\n tf1, tf2, tf3 = greyscale_make_transforms(config)\n\n else:\n assert (False)\n\n print(\"Making datasets with %s and %s\" % (dataset_class, target_transform))\n sys.stdout.flush()\n\n dataloaders = \\\n _create_dataloaders(config, dataset_class, tf1, tf2,\n partitions=config.train_partitions,\n target_transform=target_transform)\n\n mapping_assignment_dataloader = \\\n _create_mapping_loader(config, dataset_class, tf3,\n partitions=config.mapping_assignment_partitions,\n target_transform=target_transform)\n\n mapping_test_dataloader = \\\n _create_mapping_loader(config, dataset_class, tf3,\n partitions=config.mapping_test_partitions,\n target_transform=target_transform)\n\n return dataloaders, mapping_assignment_dataloader, mapping_test_dataloader\n\n\n# Other generic data creation functions ----------------------------------------\n\ndef make_STL_data(config, tf1=None, tf2=None, tf3=None,\n truncate_assign=False, truncate_pc=None):\n assert (tf3 is not None)\n if (tf1 is not None) and (tf2 is not None):\n dataloaders = _create_dataloaders(config, torchvision.datasets.STL10, tf1,\n tf2,\n partitions=config.train_partitions_head_B)\n\n mapping_assignment_dataloader = _create_mapping_loader(\n config, torchvision.datasets.STL10, tf3,\n partitions=config.mapping_assignment_partitions,\n truncate=truncate_assign, truncate_pc=truncate_pc)\n\n mapping_test_dataloader = _create_mapping_loader(\n config, torchvision.datasets.STL10, tf3,\n partitions=config.mapping_test_partitions)\n\n if (tf1 is not None) and (tf2 is not None):\n return dataloaders, mapping_assignment_dataloader, mapping_test_dataloader\n else:\n return mapping_assignment_dataloader, mapping_test_dataloader\n\n\ndef make_CIFAR_data(config, tf1=None, tf2=None, tf3=None,\n truncate_assign=False, truncate_pc=None):\n target_transform = None\n\n if config.dataset == \"CIFAR10\":\n dataset_class = torchvision.datasets.CIFAR10\n elif config.dataset == \"CIFAR100\":\n dataset_class = torchvision.datasets.CIFAR100\n elif config.dataset == \"CIFAR20\":\n dataset_class = torchvision.datasets.CIFAR100\n target_transform = _cifar100_to_cifar20\n else:\n assert (False)\n\n assert (tf3 is not None)\n if (tf1 is not None) and (tf2 is not None):\n dataloaders = _create_dataloaders(config, dataset_class, tf1, tf2,\n partitions=config.train_partitions_head_B,\n target_transform=target_transform)\n\n mapping_assignment_dataloader = _create_mapping_loader(\n config, dataset_class, tf3, config.mapping_assignment_partitions,\n target_transform=target_transform,\n truncate=truncate_assign, truncate_pc=truncate_pc)\n\n mapping_test_dataloader = _create_mapping_loader(\n config, dataset_class, tf3, config.mapping_test_partitions,\n target_transform=target_transform)\n\n if (tf1 is not None) and (tf2 is not None):\n return dataloaders, mapping_assignment_dataloader, mapping_test_dataloader\n else:\n return mapping_assignment_dataloader, mapping_test_dataloader\n\n\ndef make_MNIST_data(config, tf1=None, tf2=None, tf3=None,\n truncate_assign=False, truncate_pc=None):\n assert (tf3 is not None)\n if (tf1 is not None) and (tf2 is not None):\n dataloaders = _create_dataloaders(config, torchvision.datasets.MNIST, tf1,\n tf2,\n partitions=config.train_partitions_head_B)\n\n mapping_assignment_dataloader = _create_mapping_loader(\n config, torchvision.datasets.MNIST, tf3,\n config.mapping_assignment_partitions,\n truncate=truncate_assign, truncate_pc=truncate_pc)\n\n mapping_test_dataloader = _create_mapping_loader(\n config, torchvision.datasets.MNIST, tf3,\n config.mapping_test_partitions)\n\n if (tf1 is not None) and (tf2 is not None):\n return dataloaders, mapping_assignment_dataloader, mapping_test_dataloader\n else:\n return mapping_assignment_dataloader, mapping_test_dataloader\n\n\n# Data creation helpers --------------------------------------------------------\n\ndef _create_dataloaders(config, dataset_class, tf1, tf2,\n partitions,\n target_transform=None,\n shuffle=False):\n train_imgs_list = []\n for train_partition in partitions:\n if \"STL10\" == config.dataset:\n train_imgs_curr = dataset_class(\n root=config.dataset_root,\n transform=tf1,\n split=train_partition,\n target_transform=target_transform)\n else:\n train_imgs_curr = dataset_class(\n root=config.dataset_root,\n transform=tf1,\n train=train_partition,\n target_transform=target_transform)\n\n if hasattr(config, \"mix_train\"):\n if config.mix_train and (train_partition == \"train+unlabeled\"):\n train_imgs_curr = reorder_train_deterministic(train_imgs_curr)\n train_imgs_list.append(train_imgs_curr)\n\n train_imgs = ConcatDataset(train_imgs_list)\n train_dataloader = torch.utils.data.DataLoader(train_imgs,\n batch_size=config.dataloader_batch_sz,\n shuffle=shuffle,\n num_workers=0,\n drop_last=False)\n\n if not shuffle:\n assert (isinstance(train_dataloader.sampler,\n torch.utils.data.sampler.SequentialSampler))\n dataloaders = [train_dataloader]\n\n for d_i in range(config.num_dataloaders):\n print(\"Creating auxiliary dataloader ind %d out of %d time %s\" %\n (d_i, config.num_dataloaders, datetime.now()))\n sys.stdout.flush()\n\n train_tf_imgs_list = []\n for train_partition in partitions:\n if \"STL10\" == config.dataset:\n train_imgs_tf_curr = dataset_class(\n root=config.dataset_root,\n transform=tf2, # random per call\n split=train_partition,\n target_transform=target_transform)\n else:\n train_imgs_tf_curr = dataset_class(\n root=config.dataset_root,\n transform=tf2,\n train=train_partition,\n target_transform=target_transform)\n\n if hasattr(config, \"mix_train\"):\n if config.mix_train and (train_partition == \"train+unlabeled\"):\n train_imgs_tf_curr = reorder_train_deterministic(train_imgs_tf_curr)\n train_tf_imgs_list.append(train_imgs_tf_curr)\n\n train_imgs_tf = ConcatDataset(train_tf_imgs_list)\n train_tf_dataloader = \\\n torch.utils.data.DataLoader(train_imgs_tf,\n batch_size=config.dataloader_batch_sz,\n shuffle=shuffle,\n num_workers=0,\n drop_last=False)\n\n if not shuffle:\n assert (isinstance(train_tf_dataloader.sampler,\n torch.utils.data.sampler.SequentialSampler))\n assert (len(train_dataloader) == len(train_tf_dataloader))\n dataloaders.append(train_tf_dataloader)\n\n num_train_batches = len(dataloaders[0])\n print(\"Length of datasets vector %d\" % len(dataloaders))\n print(\"Number of batches per epoch: %d\" % num_train_batches)\n sys.stdout.flush()\n\n return dataloaders\n\n\ndef _create_mapping_loader(config, dataset_class, tf3, partitions,\n target_transform=None,\n truncate=False, truncate_pc=None,\n tencrop=False,\n shuffle=False):\n if truncate:\n print(\"Note: creating mapping loader with truncate == True\")\n\n if tencrop:\n assert (tf3 is None)\n\n imgs_list = []\n for partition in partitions:\n if \"STL10\" == config.dataset:\n imgs_curr = dataset_class(\n root=config.dataset_root,\n transform=tf3,\n split=partition,\n target_transform=target_transform)\n else:\n imgs_curr = dataset_class(\n root=config.dataset_root,\n transform=tf3,\n train=partition,\n target_transform=target_transform)\n\n if truncate:\n print(\"shrinking dataset from %d\" % len(imgs_curr))\n imgs_curr = TruncatedDataset(imgs_curr, pc=truncate_pc)\n print(\"... to %d\" % len(imgs_curr))\n\n if tencrop:\n imgs_curr = TenCropAndFinish(imgs_curr, input_sz=config.input_sz,\n include_rgb=config.include_rgb)\n\n imgs_list.append(imgs_curr)\n\n imgs = ConcatDataset(imgs_list)\n dataloader = torch.utils.data.DataLoader(imgs,\n batch_size=config.batch_sz,\n # full batch\n shuffle=shuffle,\n num_workers=0,\n drop_last=False)\n\n if not shuffle:\n assert (isinstance(dataloader.sampler,\n torch.utils.data.sampler.SequentialSampler))\n return dataloader\n\n\ndef _cifar100_to_cifar20(target):\n # obtained from cifar_test script\n _dict = \\\n {0: 4,\n 1: 1,\n 2: 14,\n 3: 8,\n 4: 0,\n 5: 6,\n 6: 7,\n 7: 7,\n 8: 18,\n 9: 3,\n 10: 3,\n 11: 14,\n 12: 9,\n 13: 18,\n 14: 7,\n 15: 11,\n 16: 3,\n 17: 9,\n 18: 7,\n 19: 11,\n 20: 6,\n 21: 11,\n 22: 5,\n 23: 10,\n 24: 7,\n 25: 6,\n 26: 13,\n 27: 15,\n 28: 3,\n 29: 15,\n 30: 0,\n 31: 11,\n 32: 1,\n 33: 10,\n 34: 12,\n 35: 14,\n 36: 16,\n 37: 9,\n 38: 11,\n 39: 5,\n 40: 5,\n 41: 19,\n 42: 8,\n 43: 8,\n 44: 15,\n 45: 13,\n 46: 14,\n 47: 17,\n 48: 18,\n 49: 10,\n 50: 16,\n 51: 4,\n 52: 17,\n 53: 4,\n 54: 2,\n 55: 0,\n 56: 17,\n 57: 4,\n 58: 18,\n 59: 17,\n 60: 10,\n 61: 3,\n 62: 2,\n 63: 12,\n 64: 12,\n 65: 16,\n 66: 12,\n 67: 1,\n 68: 9,\n 69: 19,\n 70: 2,\n 71: 10,\n 72: 0,\n 73: 1,\n 74: 16,\n 75: 12,\n 76: 9,\n 77: 13,\n 78: 15,\n 79: 13,\n 80: 16,\n 81: 19,\n 82: 2,\n 83: 4,\n 84: 6,\n 85: 19,\n 86: 5,\n 87: 5,\n 88: 8,\n 89: 19,\n 90: 18,\n 91: 1,\n 92: 2,\n 93: 15,\n 94: 6,\n 95: 0,\n 96: 17,\n 97: 8,\n 98: 14,\n 99: 13}\n\n return _dict[target]\n",
"from __future__ import print_function\n\nimport argparse\nimport pickle\nimport sys\nfrom datetime import datetime\n\nimport matplotlib\nimport torch\n\nmatplotlib.use('Agg')\nimport os\n\nimport iic.archs as archs\nfrom iic.utils.cluster.general import nice\nfrom iic.utils.segmentation.segmentation_eval import \\\n segmentation_eval\nfrom iic.utils.segmentation.data import segmentation_create_dataloaders\n\n# Clone any old model (from config and best_net) and re-evaluate, including\n# finding 1-1 mapping from output channels to ground truth clusters.\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--model_ind\", type=int, required=True)\nparser.add_argument(\"--old_model_ind\", type=int, required=True)\nparser.add_argument(\"--out_root\", type=str,\n default=\"/scratch/shared/slow/xuji/iid_private\")\n\nconfig = parser.parse_args()\n\nconfig.out_dir = os.path.join(config.out_root, str(config.model_ind))\nold_out_dir = os.path.join(config.out_root, str(config.old_model_ind))\n\nif not os.path.exists(config.out_dir):\n os.makedirs(config.out_dir)\n\nreloaded_config_path = os.path.join(old_out_dir, \"config.pickle\")\nprint(\"Loading restarting config from: %s\" % reloaded_config_path)\nwith open(reloaded_config_path, \"rb\") as config_f:\n old_config = pickle.load(config_f)\nassert (old_config.model_ind == config.old_model_ind)\n\nif not hasattr(old_config, \"batchnorm_track\"):\n old_config.batchnorm_track = True\n\nif not hasattr(old_config, \"num_sub_heads\"):\n old_config.num_sub_heads = old_config.num_heads\n\nif not hasattr(old_config, \"use_doersch_datasets\"):\n old_config.use_doersch_datasets = False\n\nwith open(os.path.join(old_config.out_dir, \"config.pickle\"), 'wb') as outfile:\n pickle.dump(old_config, outfile)\n\nwith open(os.path.join(old_config.out_dir, \"config.txt\"), \"w\") as text_file:\n text_file.write(\"%s\" % old_config)\n\n# Model ------------------------------------------------------\n\ndataloaders_head_A, mapping_assignment_dataloader, mapping_test_dataloader = \\\n segmentation_create_dataloaders(old_config)\ndataloaders_head_B = dataloaders_head_A # unlike for clustering datasets\n\nnet = archs.__dict__[old_config.arch](old_config)\n\nnet_state = torch.load(os.path.join(old_config.out_dir, \"best_net.pytorch\"),\n map_location=lambda storage, loc: storage)\nnet.load_state_dict(net_state)\nnet.cuda()\nnet = torch.nn.DataParallel(net)\n\nstats_dict = segmentation_eval(old_config, net,\n mapping_assignment_dataloader=mapping_assignment_dataloader,\n mapping_test_dataloader=mapping_test_dataloader,\n sobel=(not old_config.no_sobel),\n using_IR=old_config.using_IR,\n return_only=True)\n\nacc = stats_dict[\"best\"]\n\nconfig.epoch_stats = [stats_dict]\nconfig.epoch_acc = [acc]\nconfig.epoch_avg_subhead_acc = stats_dict[\"avg\"]\n\nprint(\"Time %s: \\n %s\" % (datetime.now(), nice(config.epoch_stats[-1])))\nsys.stdout.flush()\n\nwith open(os.path.join(config.out_dir, \"config.pickle\"), 'wb') as outfile:\n pickle.dump(config, outfile)\n\nwith open(os.path.join(config.out_dir, \"config.txt\"), \"w\") as text_file:\n text_file.write(\"%s\" % config)\n"
] |
[
[
"torch.utils.data.ConcatDataset",
"torch.utils.data.DataLoader"
],
[
"matplotlib.use",
"torch.nn.DataParallel"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
microsoft/event-vae-rl
|
[
"cb64c2809bcbfec81e84fff93a912f65c72f73d3"
] |
[
"event_vae/test.py"
] |
[
"import argparse\nimport os\nimport random\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.parallel\nimport torch.backends.cudnn as cudnn\nimport torch.optim as optim\nfrom torch.utils import data\nimport torchvision.datasets as dset\nimport torchvision.transforms as transforms\nimport torchvision.utils as vutils\nfrom torch.autograd import Variable\nimport torch.nn.functional as F\nfrom copy import deepcopy\nimport matplotlib.pyplot as plt\nimport time\nimport loss\n\nfrom torch.utils.tensorboard import SummaryWriter\n\nfrom event_ae import EventAE\nfrom chamfer import ChamferDistance, ChamferLoss\nfrom dataloader import EventStreamDataset, EventStreamArray\nfrom data_utils import *\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--input_file\",\n type=str,\n default=\"data/MSTrain_bytestream.txt\",\n help=\"training data filename\",\n)\nparser.add_argument(\"--batch_size\", type=int,\n default=1000, help=\"input batch size\")\nparser.add_argument(\"--batch_num\", type=int, default=50,\n help=\"number of batches\")\nparser.add_argument(\"--data_len\", type=int, default=2,\n help=\"event element length\")\nparser.add_argument(\"--tcode\", type=bool, default=False,\n help=\"consider timestamps\")\nparser.add_argument(\n \"--nepoch\", type=int, default=5000, help=\"number of epochs to train for\"\n)\nparser.add_argument(\"--latent_size\", type=int, default=8)\nparser.add_argument(\n \"--rec_loss\",\n type=str,\n default=\"huber\",\n help=\"type of loss: mse, huber, bce, chamfer\",\n)\nparser.add_argument(\n \"--decoder\", type=str, default=\"image\", help=\"decoder type: stream or image\"\n)\nparser.add_argument(\"--outf\", type=str, default=\"weights\",\n help=\"output folder\")\nparser.add_argument(\"--model\", type=str, default=\"\", help=\"model path\")\nparser.add_argument(\n \"--norm_type\",\n type=str,\n default=\"none\",\n help=\"normalization type: scale: [0, 1]; center: [-1, 1]\",\n)\nparser.add_argument(\"--arch\", type=str, default=\"vanilla\")\n\nopt = parser.parse_args()\nprint(opt)\n\n\ndef blue(x): return \"\\033[94m\" + x + \"\\033[0m\"\n\n\nopt.manualSeed = random.randint(1, 10000) # fix seed\nprint(\"Random Seed: \", opt.manualSeed)\nrandom.seed(opt.manualSeed)\ntorch.manual_seed(opt.manualSeed)\n\ntry:\n os.makedirs(opt.outf)\nexcept OSError:\n pass\n\nwriter = SummaryWriter(\"runs/str_to_img_test\")\n\n# Params:\n# n_events, data_size for stream decoder\n# Height, width for image decoder\n\nH = 32\nW = 32\nparams = [H, W]\n\nevent_dataset = EventStreamArray(\n opt.input_file, opt.batch_num, opt.batch_size, opt.data_len\n)\n\n\"\"\"\nbatch_size_total = opt.batch_size * opt.batch_num\ntrain_loader = data.DataLoader(\n event_dataset,\n batch_size=batch_size_total,\n shuffle=False,\n num_workers=0,\n drop_last=True,\n)\n\"\"\"\n\ndata_utils = EventDataUtils(32, 32, opt.norm_type)\n\nenet = EventAE(\n opt.data_len, opt.latent_size, params, decoder=opt.decoder, norm_type=opt.norm_type\n)\nif opt.model != \"\":\n enet.load_state_dict(torch.load(opt.model))\nenet.cuda()\n\ninit = True\nevent_np_stack = np.empty([opt.batch_num, opt.batch_size, 4], dtype=np.float32)\nframes_stack = np.empty([opt.batch_num, H * W], dtype=np.float32)\n\nif opt.data_len == 3:\n pol = True\nelse:\n pol = False\n\n\nwith torch.no_grad():\n for i, data in enumerate(test_loader, 0):\n # events = data_utils.normalize(EventExtractor(data, batch_num=1))\n\n idx = random.randint(0, 1000000)\n events = data_utils.normalize(event_array.get_event_stack(idx))\n events = Variable(events)\n events = events.transpose(2, 1)\n events = events.cuda()\n\n recon, z = enet(events)\n\n events = events.transpose(2, 1).contiguous()\n\n if opt.decoder == \"stream\":\n recon = recon.transpose(2, 1).contiguous()\n\n data_utils.compare_frames(events, recon)\n"
] |
[
[
"torch.load",
"torch.manual_seed",
"torch.no_grad",
"torch.utils.tensorboard.SummaryWriter",
"numpy.empty",
"torch.autograd.Variable"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
andreaskuster/dace
|
[
"f2c16430543bb56c54a833beeb626b8c30967428",
"f2c16430543bb56c54a833beeb626b8c30967428",
"f2c16430543bb56c54a833beeb626b8c30967428",
"f2c16430543bb56c54a833beeb626b8c30967428",
"f2c16430543bb56c54a833beeb626b8c30967428"
] |
[
"tests/control_flow_test.py",
"tests/transformations/subgraph_fusion/block_allreduce_cudatest.py",
"dace/symbolic.py",
"dace/dtypes.py",
"tests/duplicate_naming_test.py"
] |
[
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nimport numpy as np\nW = dace.symbol('W')\nH = dace.symbol('H')\n\n\[email protected]\ndef control_flow_test(A, B, tol):\n if tol[0] < 4:\n while tol[0] < 4:\n\n @dace.map(_[0:W])\n def something(i):\n a << A[0, i]\n b >> B[0, i]\n t >> tol(1, lambda x, y: x + y)\n b = a\n t = a * a\n elif tol[0] <= 5:\n\n @dace.map(_[0:W])\n def something(i):\n a << A[0, i]\n b >> B[0, i]\n b = a\n elif tol[0] <= 6:\n\n @dace.map(_[0:W])\n def something(i):\n a << A[0, i]\n b >> B[0, i]\n b = a\n else:\n for i in range(W):\n\n @dace.map(_[0:W])\n def something(j):\n a << A[0, j]\n b >> B[0, j]\n b = a\n\n\[email protected]\ndef fictest(A: dace.int32[4]):\n for a in range(min(A[0], A[1])):\n with dace.tasklet:\n inp << A[2]\n out >> A[3]\n out = inp + a\n\n\[email protected]\ndef arr2dtest(A: dace.float64[4, 2]):\n if A[1, 1] < 0.5:\n with dace.tasklet:\n out >> A[0, 0]\n out = 100.0\n else:\n with dace.tasklet:\n out >> A[0, 0]\n out = -100.0\n\n\ndef test_control_flow_basic():\n control_flow_test.compile(dace.float32[W, H], dace.float32[H, W],\n dace.float32[1])\n\n\ndef test_function_in_condition():\n A = np.random.randint(0, 10, 4, dtype=np.int32)\n expected = A.copy()\n for a in range(min(A[0], A[1])):\n expected[3] = expected[2] + a\n\n fictest(A)\n assert np.allclose(A, expected)\n\n\ndef test_2d_access():\n print(\"Running without strict transformations ...\")\n A = np.random.rand(4, 2)\n expected = A.copy()\n expected[0, 0] = 100.0 if expected[1, 1] < 0.5 else -100.0\n\n # arr2dtest(A)\n sdfg = arr2dtest.to_sdfg(strict=False)\n sdfg(A=A)\n assert np.allclose(A, expected)\n\n\ndef test_2d_access_sdfgapi():\n sdfg = dace.SDFG('access2d_sdfg')\n sdfg.add_array('A', [4, 2], dace.float64)\n begin_state = sdfg.add_state()\n state_true = sdfg.add_state()\n state_false = sdfg.add_state()\n state_true.add_edge(\n state_true.add_tasklet('assign', {}, {'a'}, 'a = 100.0'), 'a',\n state_true.add_write('A'), None, dace.Memlet('A[0, 0]'))\n state_false.add_edge(\n state_false.add_tasklet('assign', {}, {'a'}, 'a = -100.0'), 'a',\n state_false.add_write('A'), None, dace.Memlet('A[0, 0]'))\n\n sdfg.add_edge(begin_state, state_true, dace.InterstateEdge('A[1,1] < 0.5'))\n sdfg.add_edge(begin_state, state_false,\n dace.InterstateEdge('A[1,1] >= 0.5'))\n\n # Prepare inputs\n A = np.random.rand(4, 2)\n expected = A.copy()\n expected[0, 0] = 100.0 if expected[1, 1] < 0.5 else -100.0\n\n # Without control-flow detection\n A1 = A.copy()\n csdfg = sdfg.compile()\n csdfg(A=A1)\n assert np.allclose(A1, expected)\n del csdfg\n\n # With control-flow detection\n end_state = sdfg.add_state()\n sdfg.add_edge(state_true, end_state, dace.InterstateEdge())\n sdfg.add_edge(state_false, end_state, dace.InterstateEdge())\n assert 'else' in sdfg.generate_code()[0].code\n\n csdfg = sdfg.compile()\n csdfg(A=A)\n assert np.allclose(A, expected)\n\n\ndef test_2d_assignment():\n sdfg = dace.SDFG('assign2d')\n sdfg.add_array('A', [4, 2], dace.float64)\n state = sdfg.add_state()\n state2 = sdfg.add_state()\n state2.add_edge(state2.add_tasklet('assign', {}, {'a'}, 'a = i'), 'a',\n state2.add_write('A'), None, dace.Memlet('A[0, 0]'))\n sdfg.add_edge(state, state2,\n dace.InterstateEdge(assignments=dict(i='A[1, 1]')))\n\n A = np.random.rand(4, 2)\n sdfg(A=A)\n assert np.allclose(A[0, 0], A[1, 1])\n\n\ndef test_while_symbol():\n @dace.program\n def whiletest(A: dace.int32[1]):\n i = 6\n while i > 0:\n A[0] -= 1\n i -= i\n\n A = dace.ndarray([1], dace.int32)\n A[0] = 5\n\n whiletest(A)\n\n assert A[0] == 4\n\n if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n code = whiletest.to_sdfg().generate_code()[0].clean_code\n assert 'while ' in code\n assert 'goto ' not in code\n\n\ndef test_while_data():\n @dace.program\n def whiletest(A: dace.int32[1]):\n while A[0] > 0:\n with dace.tasklet:\n a << A[0]\n b >> A[0]\n b = a - 1\n\n A = dace.ndarray([1], dace.int32)\n A[0] = 5\n\n whiletest(A)\n\n assert A[0] == 0\n\n # Disable check due to CFG generation in Python frontend\n # if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n # code = whiletest.to_sdfg().generate_code()[0].clean_code\n # assert 'while ' in code\n\n\ndef test_dowhile():\n sdfg = dace.SDFG('dowhiletest')\n sdfg.add_array('A', [1], dace.int32)\n init = sdfg.add_state()\n state1 = sdfg.add_state()\n sdfg.add_edge(init, state1, dace.InterstateEdge(assignments={'cond': '1'}))\n state2 = sdfg.add_state()\n sdfg.add_edge(state1, state2,\n dace.InterstateEdge(assignments={'cond': 'cond + 1'}))\n guard = sdfg.add_state_after(state2)\n after = sdfg.add_state()\n sdfg.add_edge(guard, state1, dace.InterstateEdge('cond < 5'))\n sdfg.add_edge(guard, after, dace.InterstateEdge('cond >= 5'))\n\n t = state1.add_tasklet('something', {'a'}, {'o'}, 'o = a + 1')\n r = state1.add_read('A')\n w = state1.add_write('A')\n state1.add_edge(r, None, t, 'a', dace.Memlet('A'))\n state1.add_edge(t, 'o', w, None, dace.Memlet('A'))\n\n A = np.zeros([1], dtype=np.int32)\n sdfg(A=A)\n assert A[0] == 4\n\n # TODO: Not yet available\n # if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n # code = sdfg.generate_code()[0].clean_code\n # assert 'do {' in code and '} while' in code\n\n\ndef test_ifchain():\n @dace.program\n def casetest(A: dace.int32[2]):\n if A[0] == 0:\n A[1] = 5\n elif A[0] == 1:\n A[1] = 3\n elif A[0] == 3:\n A[1] = 1\n elif A[0] == 5:\n A[1] = 0\n\n sdfg: dace.SDFG = casetest.to_sdfg()\n A = np.array([3, 0], dtype=np.int32)\n sdfg(A=A)\n assert A[1] == 1\n\n if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n code = sdfg.generate_code()[0].clean_code\n assert 'else ' in code\n\n\ndef test_ifchain_manual():\n sdfg = dace.SDFG('casetest')\n sdfg.add_array('A', [2], dace.int32)\n init = sdfg.add_state()\n case0 = sdfg.add_state()\n case1 = sdfg.add_state()\n case3 = sdfg.add_state()\n case5 = sdfg.add_state()\n end = sdfg.add_state()\n for case, state in [(0, case0), (1, case1), (3, case3), (5, case5)]:\n if case == 5:\n sdfg.add_edge(init, state, dace.InterstateEdge(f'A[0] >= {case}'))\n else:\n sdfg.add_edge(init, state, dace.InterstateEdge(f'A[0] == {case}'))\n t = state.add_tasklet('update', {}, {'a'}, f'a = {case}')\n w = state.add_write('A')\n state.add_edge(t, 'a', w, None, dace.Memlet('A[1]'))\n sdfg.add_edge(state, end, dace.InterstateEdge())\n\n A = np.array([6, 0], dtype=np.int32)\n sdfg(A=A)\n assert A[1] == 5\n\n if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n code = sdfg.generate_code()[0].clean_code\n assert 'else if' in code\n\n\ndef test_switchcase():\n sdfg = dace.SDFG('casetest')\n sdfg.add_array('A', [2], dace.int32)\n init = sdfg.add_state()\n case0 = sdfg.add_state()\n case1 = sdfg.add_state()\n case3 = sdfg.add_state()\n case5 = sdfg.add_state()\n end = sdfg.add_state()\n for case, state in [(0, case0), (1, case1), (3, case3), (5, case5)]:\n if case == 3:\n sdfg.add_edge(init, state, dace.InterstateEdge(f'{case} == A[0]'))\n else:\n sdfg.add_edge(init, state, dace.InterstateEdge(f'A[0] == {case}'))\n t = state.add_tasklet('update', {}, {'a'}, f'a = {case}')\n w = state.add_write('A')\n state.add_edge(t, 'a', w, None, dace.Memlet('A[1]'))\n sdfg.add_edge(state, end, dace.InterstateEdge())\n\n A = np.array([3, 0], dtype=np.int32)\n sdfg(A=A)\n assert A[1] == 3\n\n if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n code = sdfg.generate_code()[0].clean_code\n assert 'switch ' in code\n\n\ndef test_fsm():\n # Could be interpreted as a while loop of a switch-case\n sdfg = dace.SDFG('fsmtest')\n sdfg.add_scalar('nextstate', dace.int32)\n sdfg.add_array('A', [1], dace.int32)\n start = sdfg.add_state()\n init = sdfg.add_state_after(start)\n case0 = sdfg.add_state()\n case1 = sdfg.add_state()\n case3 = sdfg.add_state()\n case5 = sdfg.add_state()\n estate = sdfg.add_state()\n\n # State transitions\n fsm = {0: 3, 3: 1, 1: 5, 5: 7}\n\n for case, state in [(0, case0), (1, case1), (3, case3), (5, case5)]:\n sdfg.add_edge(init, state, dace.InterstateEdge(f'nextstate == {case}'))\n\n r = state.add_read('A')\n t = state.add_tasklet('update', {'ain'}, {'a', 'nstate'},\n f'a = ain + {case}; nstate = {fsm[case]}')\n w = state.add_write('A')\n ws = state.add_write('nextstate')\n state.add_edge(r, None, t, 'ain', dace.Memlet('A'))\n state.add_edge(t, 'a', w, None, dace.Memlet('A'))\n state.add_edge(t, 'nstate', ws, None, dace.Memlet('nextstate'))\n\n sdfg.add_edge(state, estate, dace.InterstateEdge())\n sdfg.add_edge(estate, init, dace.InterstateEdge())\n\n A = np.array([1], dtype=np.int32)\n sdfg(A=A, nextstate=0)\n assert A[0] == 1 + 3 + 1 + 5\n\n if dace.Config.get_bool('optimizer', 'detect_control_flow'):\n code = sdfg.generate_code()[0].clean_code\n assert 'switch ' in code\n\n\nif __name__ == '__main__':\n test_control_flow_basic()\n test_function_in_condition()\n test_2d_access()\n test_2d_access_sdfgapi()\n test_2d_assignment()\n test_while_symbol()\n test_while_data()\n test_dowhile()\n test_ifchain()\n test_ifchain_manual()\n test_switchcase()\n test_fsm()\n",
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nimport numpy as np\nimport pytest\nfrom dace.transformation.subgraph import ReduceExpansion\n\nfrom dace.libraries.standard.nodes.reduce import Reduce\n\nN = dace.symbol('N')\nM = dace.symbol('M')\nN.set(30)\nM.set(30)\n\n\[email protected]\ndef program(A: dace.float32[M, N]):\n return dace.reduce(lambda a, b: max(a, b), A, axis=1, identity=0)\n\[email protected]\ndef test_blockallreduce():\n A = np.random.rand(M.get(), N.get()).astype(np.float32)\n sdfg = program.to_sdfg()\n sdfg.apply_gpu_transformations()\n\n graph = sdfg.nodes()[0]\n for node in graph.nodes():\n if isinstance(node, Reduce):\n reduce_node = node\n reduce_node.implementation = 'CUDA (device)'\n\n csdfg = sdfg.compile()\n result1 = csdfg(A=A, M=M, N=N)\n del csdfg\n\n sdfg_id = 0\n state_id = 0\n subgraph = {ReduceExpansion._reduce: graph.nodes().index(reduce_node)}\n # expand first\n transform = ReduceExpansion(sdfg_id, state_id, subgraph, 0)\n transform.reduce_implementation = 'CUDA (block allreduce)'\n transform.apply(sdfg)\n csdfg = sdfg.compile()\n result2 = csdfg(A=A, M=M, N=N)\n del csdfg\n\n print(np.linalg.norm(result1))\n print(np.linalg.norm(result2))\n assert np.allclose(result1, result2)\n\n print(\"PASS\")\n\n\nif __name__ == '__main__':\n test_blockallreduce()\n",
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport ast\nfrom functools import lru_cache\nimport sympy\nimport pickle\nimport re\nfrom typing import Dict, Optional, Set, Union\nimport warnings\nimport numpy\n\nimport sympy.abc\nimport sympy.printing.str\n\nfrom dace import dtypes\n\nDEFAULT_SYMBOL_TYPE = dtypes.int32\n\n\nclass symbol(sympy.Symbol):\n \"\"\" Defines a symbolic expression. Extends SymPy symbols with DaCe-related\n information. \"\"\"\n\n s_currentsymbol = 0\n\n def __new__(cls, name=None, dtype=DEFAULT_SYMBOL_TYPE, **assumptions):\n if name is None:\n # Set name dynamically\n name = \"sym_\" + str(symbol.s_currentsymbol)\n symbol.s_currentsymbol += 1\n elif name.startswith('__DACE'):\n raise NameError('Symbols cannot start with __DACE')\n elif not dtypes.validate_name(name):\n raise NameError('Invalid symbol name \"%s\"' % name)\n\n if not isinstance(dtype, dtypes.typeclass):\n raise TypeError('dtype must be a DaCe type, got %s' % str(dtype))\n\n dkeys = [k for k, v in dtypes.DTYPE_TO_TYPECLASS.items() if v == dtype]\n is_integer = [\n issubclass(k, int) or issubclass(k, numpy.integer) for k in dkeys\n ]\n if 'integer' in assumptions or not numpy.any(is_integer):\n # Using __xnew__ as the regular __new__ is cached, which leads\n # to modifying different references of symbols with the same name.\n self = sympy.Symbol.__xnew__(cls, name, **assumptions)\n else:\n self = sympy.Symbol.__xnew__(cls, name, integer=True, **assumptions)\n\n self.dtype = dtype\n self._constraints = []\n self.value = None\n return self\n\n def set(self, value):\n warnings.warn('symbol.set is deprecated, use keyword arguments',\n DeprecationWarning)\n if value is not None:\n # First, check constraints\n self.check_constraints(value)\n\n self.value = self.dtype(value)\n\n def __getstate__(self):\n return dict(\n super().__getstate__(), **{\n 'value': self.value,\n 'dtype': self.dtype,\n '_constraints': self._constraints\n })\n\n def is_initialized(self):\n return self.value is not None\n\n def get(self):\n warnings.warn('symbol.get is deprecated, use keyword arguments',\n DeprecationWarning)\n if self.value is None:\n raise UnboundLocalError('Uninitialized symbol value for \\'' +\n self.name + '\\'')\n return self.value\n\n def set_constraints(self, constraint_list):\n try:\n iter(constraint_list)\n self._constraints = constraint_list\n except TypeError: # constraint_list is not iterable\n self._constraints = [constraint_list]\n\n # Check for the new constraints and reset symbol value if necessary\n if symbol.s_values[self.name] is not None:\n try:\n self.check_constraints(symbol.s_values[self.name])\n except RuntimeError:\n self.reset() # Reset current value\n raise\n\n def add_constraints(self, constraint_list):\n try:\n iter(constraint_list)\n symbol.s_constraints[self.name].extend(constraint_list)\n except TypeError: # constraint_list is not iterable\n symbol.s_constraints[self.name].append(constraint_list)\n\n # Check for the new constraints and reset symbol value if necessary\n if symbol.s_values[self.name] is not None:\n try:\n self.check_constraints(symbol.s_values[self.name])\n except RuntimeError:\n self.reset() # Reset current value\n raise\n\n @property\n def constraints(self):\n return self._constraints\n\n def check_constraints(self, value):\n fail = None\n for constraint in self.constraints:\n try:\n eval_cons = constraint.subs({self: value})\n if not eval_cons:\n fail = constraint\n break\n except (AttributeError, TypeError, ValueError):\n raise RuntimeError(\n 'Cannot validate constraint %s for symbol %s' %\n (str(constraint), self.name))\n if fail is not None:\n raise RuntimeError(\n 'Value %s invalidates constraint %s for symbol %s' %\n (str(value), str(fail), self.name))\n\n def get_or_return(self, uninitialized_ret):\n return self.value or uninitialized_ret\n\n\nclass SymExpr(object):\n \"\"\" Symbolic expressions with support for an overapproximation expression.\n \"\"\"\n def __init__(self,\n main_expr: Union[str, 'SymExpr'],\n approx_expr: Optional[Union[str, 'SymExpr']] = None):\n self._main_expr = pystr_to_symbolic(main_expr)\n if approx_expr is None:\n self._approx_expr = self._main_expr\n else:\n self._approx_expr = pystr_to_symbolic(approx_expr)\n\n def __new__(cls, *args, **kwargs):\n if len(args) == 1:\n return args[0]\n if len(args) == 2:\n main_expr, approx_expr = args\n # If values are equivalent, create a normal symbolic expression\n if approx_expr is None or main_expr == approx_expr:\n return main_expr\n return super(SymExpr, cls).__new__(cls)\n\n @property\n def expr(self):\n return self._main_expr\n\n @property\n def approx(self):\n return self._approx_expr\n\n def subs(self, repldict):\n return SymExpr(self._main_expr.subs(repldict),\n self._approx_expr.subs(repldict))\n\n def match(self, *args, **kwargs):\n return self._main_expr.match(*args, **kwargs)\n\n def __hash__(self):\n return hash((self.expr, self.approx))\n\n def __str__(self):\n if self.expr != self.approx:\n return str(self.expr) + \" (~\" + str(self.approx) + \")\"\n else:\n return str(self.expr)\n\n def __add__(self, other):\n if isinstance(other, SymExpr):\n return SymExpr(self.expr + other.expr, self.approx + other.approx)\n if isinstance(other, sympy.Expr):\n return SymExpr(self.expr + other, self.approx + other)\n return self + pystr_to_symbolic(other)\n\n __radd__ = __add__\n\n def __sub__(self, other):\n if isinstance(other, SymExpr):\n return SymExpr(self.expr - other.expr, self.approx - other.approx)\n if isinstance(other, sympy.Expr):\n return SymExpr(self.expr - other, self.approx - other)\n return self - pystr_to_symbolic(other)\n\n def __rsub__(self, other):\n if isinstance(other, SymExpr):\n return SymExpr(other.expr - self.expr, other.approx - self.approx)\n if isinstance(other, sympy.Expr):\n return SymExpr(other - self.expr, other - self.approx)\n return pystr_to_symbolic(other) - self\n\n def __mul__(self, other):\n if isinstance(other, SymExpr):\n return SymExpr(self.expr * other.expr, self.approx * other.approx)\n if isinstance(other, sympy.Expr):\n return SymExpr(self.expr * other, self.approx * other)\n return self * pystr_to_symbolic(other)\n\n __rmul__ = __mul__\n\n def __div__(self, other):\n if isinstance(other, SymExpr):\n return SymExpr(self.expr / other.expr, self.approx / other.approx)\n if isinstance(other, sympy.Expr):\n return SymExpr(self.expr / other, self.approx / other)\n return self / pystr_to_symbolic(other)\n\n __truediv__ = __div__\n\n def __floordiv__(self, other):\n if isinstance(other, SymExpr):\n return SymExpr(self.expr // other.expr, self.approx // other.approx)\n if isinstance(other, sympy.Expr):\n return SymExpr(self.expr // other, self.approx // other)\n return self // pystr_to_symbolic(other)\n\n def __mod__(self, other):\n if isinstance(other, SymExpr):\n return SymExpr(self.expr % other.expr, self.approx % other.approx)\n if isinstance(other, sympy.Expr):\n return SymExpr(self.expr % other, self.approx % other)\n return self % pystr_to_symbolic(other)\n\n def __pow__(self, other):\n if isinstance(other, SymExpr):\n return SymExpr(self.expr**other.expr, self.approx**other.approx)\n if isinstance(other, sympy.Expr):\n return SymExpr(self.expr**other, self.approx**other)\n return self**pystr_to_symbolic(other)\n\n def __eq__(self, other):\n if isinstance(other, sympy.Expr):\n return self.expr == other\n if isinstance(other, SymExpr):\n return self.expr == other.expr and self.approx == other.approx\n return self == pystr_to_symbolic(other)\n\n\n# Type hint for symbolic expressions\nSymbolicType = Union[sympy.Basic, SymExpr]\n\n\ndef symvalue(val):\n \"\"\" Returns the symbol value if it is a symbol. \"\"\"\n if isinstance(val, symbol):\n return val.get()\n return val\n\n\n# http://stackoverflow.com/q/3844948/\ndef _checkEqualIvo(lst):\n return not lst or lst.count(lst[0]) == len(lst)\n\n\ndef symtype(expr):\n \"\"\" Returns the inferred symbol type from a symbolic expression. \"\"\"\n stypes = [s.dtype for s in symlist(expr).values()]\n if len(stypes) == 0:\n return DEFAULT_SYMBOL_TYPE\n elif _checkEqualIvo(stypes):\n return stypes[0]\n else:\n raise TypeError(\n 'Cannot infer symbolic type from expression \"%s\"'\n ' with symbols [%s]' % (str(expr), ', '.join(\n [str(s) + \": \" + str(s.dtype) for s in symlist(expr)])))\n\n\ndef symlist(values):\n \"\"\" Finds symbol dependencies of expressions. \"\"\"\n result = {}\n try:\n values = iter(values)\n except TypeError:\n values = [values]\n\n for expr in values:\n if isinstance(expr, SymExpr):\n true_expr = expr.expr\n elif isinstance(expr, sympy.Basic):\n true_expr = expr\n else:\n continue\n for atom in true_expr.atoms():\n if isinstance(atom, symbol):\n result[atom.name] = atom\n return result\n\n\ndef evaluate(expr: Union[sympy.Basic, int, float],\n symbols: Dict[Union[symbol, str], Union[int, float]]) -> \\\n Union[int, float, numpy.number]:\n \"\"\"\n Evaluates an expression to a constant based on a mapping from symbols\n to values.\n :param expr: The expression to evaluate.\n :param symbols: A mapping of symbols to their values.\n :return: A constant value based on ``expr`` and ``symbols``.\n \"\"\"\n if isinstance(expr, SymExpr):\n return evaluate(expr.expr, symbols)\n if issymbolic(expr, set(map(str, symbols.keys()))):\n raise TypeError('Expression cannot be evaluated to a constant')\n if isinstance(expr, (int, float, numpy.number)):\n return expr\n\n # Evaluate all symbols\n syms = {(sname if isinstance(sname, sympy.Symbol) else symbol(sname)):\n sval.get() if isinstance(sval, symbol) else sval\n for sname, sval in symbols.items()}\n\n return expr.subs(syms)\n\n\ndef issymbolic(value, constants=None):\n \"\"\" Returns True if an expression is symbolic with respect to its contents\n and a given dictionary of constant values. \"\"\"\n constants = constants or {}\n if isinstance(value, SymExpr):\n return issymbolic(value.expr)\n if isinstance(value, symbol) and value.name not in constants:\n return True\n if isinstance(value, sympy.Basic):\n for atom in value.atoms():\n if isinstance(atom, symbol) and atom.name not in constants:\n return True\n return False\n\n\ndef overapproximate(expr):\n \"\"\" Takes a sympy expression and returns its maximal possible value\n in specific cases. \"\"\"\n if isinstance(expr, SymExpr):\n if expr.expr != expr.approx:\n return expr.approx\n else:\n return overapproximate(expr.expr)\n if not isinstance(expr, sympy.Basic):\n return expr\n a = sympy.Wild('a')\n b = sympy.Wild('b')\n c = sympy.Wild('c')\n\n # If Min(x, N-y), return the non-symbolic of the two components\n match = expr.match(sympy.Min(a, b) + c)\n if match is not None and len(match) == 3:\n # First, construct the min expression with \"c\" inline\n newexpr = sympy.Min(match[a] + match[c], match[b] + match[c])\n # Match again\n match = newexpr.match(sympy.Min(a, b))\n if match is not None and len(match) == 2:\n if issymbolic(match[a]) and not issymbolic(match[b]):\n return match[b]\n if issymbolic(match[b]) and not issymbolic(match[a]):\n return match[a]\n\n # If ceiling((k * ((N - 1) / k))) + k), return N\n a = sympy.Wild('a', properties=[lambda k: k.is_Symbol or k.is_Integer])\n b = sympy.Wild('b', properties=[lambda k: k.is_Symbol or k.is_Integer])\n int_floor = sympy.Function('int_floor')\n match = expr.match(sympy.ceiling(b * int_floor(a - 1, b)) + b)\n if match is not None and len(match) == 2:\n return match[a]\n\n return expr\n\n\ndef symbols_in_ast(tree):\n \"\"\" Walks an AST and finds all names, excluding function names. \"\"\"\n to_visit = list(tree.__dict__.items())\n symbols = []\n while len(to_visit) > 0:\n (key, val) = to_visit.pop()\n if key == \"func\":\n continue\n if isinstance(val, ast.Name):\n symbols.append(val.id)\n continue\n if isinstance(val, ast.expr):\n to_visit += list(val.__dict__.items())\n if isinstance(val, list):\n to_visit += [(key, v) for v in val]\n return dtypes.deduplicate(symbols)\n\n\ndef symbol_name_or_value(val):\n \"\"\" Returns the symbol name if symbol, otherwise the value as a string. \"\"\"\n if isinstance(val, symbol):\n return val.name\n return str(val)\n\n\ndef sympy_to_dace(exprs, symbol_map=None):\n \"\"\" Convert all `sympy.Symbol`s to DaCe symbols, according to\n `symbol_map`. \"\"\"\n repl = {}\n symbol_map = symbol_map or {}\n\n oneelem = False\n try:\n iter(exprs)\n except TypeError:\n oneelem = True\n exprs = [exprs]\n\n exprs = list(exprs)\n\n for i, expr in enumerate(exprs):\n if isinstance(expr, sympy.Basic):\n for atom in expr.atoms():\n if isinstance(atom, sympy.Symbol):\n try:\n repl[atom] = symbol_map[atom.name]\n except KeyError:\n # Symbol is not in map, create a DaCe symbol with same assumptions\n repl[atom] = symbol(atom.name, **atom.assumptions0)\n exprs[i] = expr.subs(repl)\n if oneelem:\n return exprs[0]\n return exprs\n\n\ndef is_sympy_userfunction(expr):\n \"\"\" Returns True if the expression is a SymPy function. \"\"\"\n try:\n return issubclass(type(type(expr)),\n sympy.core.function.UndefinedFunction)\n except AttributeError:\n return issubclass(type(type(expr)), sympy.function.UndefinedFunction)\n\n\ndef swalk(expr, enter_functions=False):\n \"\"\" Walk over a symbolic expression tree (similar to `ast.walk`).\n Returns an iterator that yields the values and recurses into functions,\n if specified.\n \"\"\"\n yield expr\n for arg in expr.args:\n if not enter_functions and is_sympy_userfunction(arg):\n yield arg\n continue\n yield from swalk(arg)\n\n\n_builtin_userfunctions = {\n 'int_floor', 'int_ceil', 'min', 'Min', 'max', 'Max', 'not', 'Not', 'Eq',\n 'NotEq', 'Ne'\n}\n\n\ndef contains_sympy_functions(expr):\n \"\"\" Returns True if expression contains Sympy functions. \"\"\"\n if is_sympy_userfunction(expr):\n if str(expr.func) in _builtin_userfunctions:\n return False\n return True\n for arg in expr.args:\n if contains_sympy_functions(arg):\n return True\n return False\n\n\ndef free_symbols_and_functions(expr: Union[SymbolicType, str]) -> Set[str]:\n if not isinstance(expr, (sympy.Basic, str)):\n return set()\n if isinstance(expr, str):\n if dtypes.validate_name(expr):\n return {expr}\n expr = pystr_to_symbolic(expr)\n\n result = {str(k) for k in expr.free_symbols}\n for atom in swalk(expr):\n if (is_sympy_userfunction(atom)\n and str(atom.func) not in _builtin_userfunctions):\n result.add(str(atom.func))\n return result\n\n\ndef sympy_numeric_fix(expr):\n \"\"\" Fix for printing out integers as floats with \".00000000\".\n Converts the float constants in a given expression to integers. \"\"\"\n if not isinstance(expr, sympy.Basic):\n if int(expr) == expr:\n return int(expr)\n return expr\n\n if isinstance(expr, sympy.Number) and expr == int(expr):\n return int(expr)\n return expr\n\n\ndef sympy_intdiv_fix(expr):\n \"\"\" Fix for SymPy printing out reciprocal values when they should be\n integral in \"ceiling/floor\" sympy functions.\n \"\"\"\n nexpr = expr\n if not isinstance(expr, sympy.Basic):\n return expr\n\n # The properties avoid matching the silly case \"ceiling(N/32)\" as\n # ceiling of 1/N and 1/32\n a = sympy.Wild('a', properties=[lambda k: k.is_Symbol or k.is_Integer])\n b = sympy.Wild('b', properties=[lambda k: k.is_Symbol or k.is_Integer])\n c = sympy.Wild('c')\n d = sympy.Wild('d')\n e = sympy.Wild('e', properties=[lambda k: isinstance(k, sympy.Basic) and not isinstance(k, sympy.Atom)])\n int_ceil = sympy.Function('int_ceil')\n int_floor = sympy.Function('int_floor')\n\n processed = 1\n while processed > 0:\n processed = 0\n for ceil in nexpr.find(sympy.ceiling):\n # Simple ceiling\n m = ceil.match(sympy.ceiling(a / b))\n if m is not None:\n nexpr = nexpr.subs(ceil, int_ceil(m[a], m[b]))\n processed += 1\n continue\n # Ceiling of ceiling: \"ceil(ceil(c/d) / b)\"\n m = ceil.match(sympy.ceiling(int_ceil(c, d) / b))\n if m is not None:\n nexpr = nexpr.subs(ceil, int_ceil(int_ceil(m[c], m[d]), m[b]))\n processed += 1\n continue\n # Ceiling of ceiling: \"ceil(a / ceil(c/d))\"\n m = ceil.match(sympy.ceiling(a / int_ceil(c, d)))\n if m is not None:\n nexpr = nexpr.subs(ceil, int_ceil(m[a], int_ceil(m[c], m[d])))\n processed += 1\n continue\n # Match ceiling of multiplication with our custom integer functions\n m = ceil.match(sympy.ceiling(a * int_floor(c, d)))\n if m is not None:\n nexpr = nexpr.subs(ceil, m[a] * int_floor(m[c], m[d]))\n processed += 1\n continue\n m = ceil.match(sympy.ceiling(a * int_ceil(c, d)))\n if m is not None:\n nexpr = nexpr.subs(ceil, m[a] * int_ceil(m[c], m[d]))\n processed += 1\n continue\n # Ceiling with composite expression at the numerator\n m = ceil.match(sympy.ceiling(e / b))\n if m is not None:\n nexpr = nexpr.subs(ceil, int_ceil(m[e], m[b]))\n processed += 1\n continue\n for floor in nexpr.find(sympy.floor):\n # Simple floor\n m = floor.match(sympy.floor(a / b))\n if m is not None:\n nexpr = nexpr.subs(floor, int_floor(m[a], m[b]))\n processed += 1\n continue\n # Floor of floor: \"floor(floor(c/d) / b)\"\n m = floor.match(sympy.floor(int_floor(c, d) / b))\n if m is not None:\n nexpr = nexpr.subs(floor, int_floor(int_floor(m[c], m[d]),\n m[b]))\n processed += 1\n continue\n # Floor of floor: \"floor(a / floor(c/d))\"\n m = floor.match(sympy.floor(a / int_floor(c, d)))\n if m is not None:\n nexpr = nexpr.subs(floor, int_floor(m[a], int_floor(m[c],\n m[d])))\n processed += 1\n continue\n # floor with composite expression\n m = floor.match(sympy.floor(e / b))\n if m is not None:\n nexpr = nexpr.subs(floor, int_floor(m[e], m[b]))\n processed += 1\n continue\n return nexpr\n\n\ndef sympy_divide_fix(expr):\n \"\"\" Fix SymPy printouts where integer division such as \"tid/2\" turns\n into \".5*tid\".\n \"\"\"\n nexpr = expr\n if not isinstance(expr, sympy.Basic):\n return expr\n\n int_floor = sympy.Function('int_floor')\n\n processed = 1\n while processed > 0:\n processed = 0\n for candidate in nexpr.find(sympy.Mul):\n for i, arg in enumerate(candidate.args):\n if isinstance(arg, sympy.Number) and abs(arg) >= 1:\n continue\n if isinstance(arg, sympy.Number) and (1 / arg) == int(1 / arg):\n ri = i\n break\n else:\n continue\n nexpr = nexpr.subs(\n candidate,\n int_floor(\n sympy.Mul(*(candidate.args[:ri] + candidate.args[ri + 1:])),\n int(1 / candidate.args[ri])))\n processed += 1\n\n return nexpr\n\n\ndef simplify_ext(expr):\n \"\"\"\n An extended version of simplification with expression fixes for sympy.\n :param expr: A sympy expression.\n :return: Simplified version of the expression.\n \"\"\"\n if not isinstance(expr, sympy.Basic):\n return expr\n a = sympy.Wild('a')\n b = sympy.Wild('b')\n c = sympy.Wild('c')\n\n # Push expressions into both sides of min/max.\n # Example: Min(N, 4) + 1 => Min(N + 1, 5)\n dic = expr.match(sympy.Min(a, b) + c)\n if dic:\n return sympy.Min(dic[a] + dic[c], dic[b] + dic[c])\n dic = expr.match(sympy.Max(a, b) + c)\n if dic:\n return sympy.Max(dic[a] + dic[c], dic[b] + dic[c])\n return expr\n\n\nclass SympyBooleanConverter(ast.NodeTransformer):\n \"\"\" \n Replaces boolean operations with the appropriate SymPy functions to avoid\n non-symbolic evaluation.\n \"\"\"\n _ast_to_sympy_comparators = {\n ast.Eq: 'Eq',\n ast.Gt: 'Gt',\n ast.GtE: 'Ge',\n ast.Lt: 'Lt',\n ast.LtE: 'Le',\n ast.NotEq: 'Ne',\n # Python-specific\n ast.In: 'In',\n ast.Is: 'Is',\n ast.IsNot: 'IsNot',\n ast.NotIn: 'NotIn',\n }\n\n def visit_UnaryOp(self, node):\n if isinstance(node.op, ast.Not):\n func_node = ast.copy_location(\n ast.Name(id=type(node.op).__name__, ctx=ast.Load()), node)\n new_node = ast.Call(func=func_node,\n args=[self.visit(node.operand)],\n keywords=[])\n return ast.copy_location(new_node, node)\n return node\n\n def visit_BoolOp(self, node):\n func_node = ast.copy_location(\n ast.Name(id=type(node.op).__name__, ctx=ast.Load()), node)\n new_node = ast.Call(func=func_node,\n args=[self.visit(value) for value in node.values],\n keywords=[])\n return ast.copy_location(new_node, node)\n\n def visit_Compare(self, node: ast.Compare):\n if len(node.ops) > 1 or len(node.comparators) > 1:\n raise NotImplementedError\n op = node.ops[0]\n arguments = [node.left, node.comparators[0]]\n func_node = ast.copy_location(\n ast.Name(\n id=SympyBooleanConverter._ast_to_sympy_comparators[type(op)],\n ctx=ast.Load()), node)\n new_node = ast.Call(func=func_node,\n args=[self.visit(arg) for arg in arguments],\n keywords=[])\n return ast.copy_location(new_node, node)\n\n@lru_cache(2048)\ndef pystr_to_symbolic(expr, symbol_map=None, simplify=None):\n \"\"\" Takes a Python string and converts it into a symbolic expression. \"\"\"\n from dace.frontend.python.astutils import unparse # Avoid import loops\n\n if isinstance(expr, (SymExpr, sympy.Basic)):\n return expr\n if isinstance(expr, str) and dtypes.validate_name(expr):\n return symbol(expr)\n\n symbol_map = symbol_map or {}\n locals = {\n 'min': sympy.Min,\n 'max': sympy.Max,\n 'True': sympy.true,\n 'False': sympy.false,\n 'GtE': sympy.Ge,\n 'LtE': sympy.Le,\n 'NotEq': sympy.Ne,\n }\n # _clash1 enables all one-letter variables like N as symbols\n # _clash also allows pi, beta, zeta and other common greek letters\n locals.update(sympy.abc._clash)\n\n # Sympy processes \"not/and/or\" as direct evaluation. Replace with\n # And/Or(x, y), Not(x)\n if isinstance(expr, str) and re.search(r'\\bnot\\b|\\band\\b|\\bor\\b|==|!=',\n expr):\n expr = unparse(SympyBooleanConverter().visit(ast.parse(expr).body[0]))\n\n # TODO: support SymExpr over-approximated expressions\n try:\n return sympy_to_dace(sympy.sympify(expr, locals, evaluate=simplify),\n symbol_map)\n except TypeError: # Symbol object is not subscriptable\n # Replace subscript expressions with function calls\n expr = expr.replace('[', '(')\n expr = expr.replace(']', ')')\n return sympy_to_dace(sympy.sympify(expr, locals, evaluate=simplify),\n symbol_map)\n\n\nclass DaceSympyPrinter(sympy.printing.str.StrPrinter):\n \"\"\" Several notational corrections for integer math and C++ translation\n that sympy.printing.cxxcode does not provide. \"\"\"\n def _print_Float(self, expr):\n if int(expr) == expr:\n return str(int(expr))\n return super()._print_Float(expr)\n\n def _print_Function(self, expr):\n if str(expr.func) == 'int_floor':\n return '((%s) / (%s))' % (self._print(\n expr.args[0]), self._print(expr.args[1]))\n return super()._print_Function(expr)\n\n def _print_Mod(self, expr):\n return '((%s) %% (%s))' % (self._print(\n expr.args[0]), self._print(expr.args[1]))\n\n def _print_Equality(self, expr):\n return '((%s) == (%s))' % (self._print(\n expr.args[0]), self._print(expr.args[1]))\n\n def _print_Unequality(self, expr):\n return '((%s) != (%s))' % (self._print(\n expr.args[0]), self._print(expr.args[1]))\n\n def _print_Not(self, expr):\n return '(not (%s))' % self._print(expr.args[0])\n\n\ndef symstr(sym):\n \"\"\" Convert a symbolic expression to a C++ compilable expression. \"\"\"\n def repstr(s):\n return s.replace('Min', 'min').replace('Max', 'max')\n\n if isinstance(sym, SymExpr):\n return symstr(sym.expr)\n\n try:\n sym = sympy_numeric_fix(sym)\n sym = sympy_intdiv_fix(sym)\n sym = sympy_divide_fix(sym)\n\n sstr = DaceSympyPrinter().doprint(sym)\n\n if isinstance(sym,\n symbol) or isinstance(sym, sympy.Symbol) or isinstance(\n sym, sympy.Number) or dtypes.isconstant(sym):\n return repstr(sstr)\n else:\n return '(' + repstr(sstr) + ')'\n except (AttributeError, TypeError, ValueError):\n sstr = DaceSympyPrinter().doprint(sym)\n return '(' + repstr(sstr) + ')'\n\n\ndef _spickle(obj):\n return str(obj), {\n s.name: (s.dtype, s._assumptions)\n for s in symlist(obj).values()\n }\n\n\ndef _sunpickle(obj):\n s, slist = obj\n # Create symbols\n for sname, (stype, assumptions) in slist.items():\n symbol(sname, stype, **assumptions)\n return pystr_to_symbolic(s)\n\n\nclass SympyAwarePickler(pickle.Pickler):\n \"\"\" Custom Pickler class that safely saves SymPy expressions\n with function definitions in expressions (e.g., int_ceil).\n \"\"\"\n def persistent_id(self, obj):\n if isinstance(obj, sympy.Basic):\n # Save sympy expression as srepr\n return (\"DaCeSympyExpression\", _spickle(obj))\n else:\n # Pickle everything else normally\n return None\n\n\nclass SympyAwareUnpickler(pickle.Unpickler):\n \"\"\" Custom Unpickler class that safely restores SymPy expressions\n with function definitions in expressions (e.g., int_ceil).\n \"\"\"\n def persistent_load(self, pid):\n type_tag, value = pid\n if type_tag == \"DaCeSympyExpression\":\n return _sunpickle(value)\n else:\n raise pickle.UnpicklingError(\"unsupported persistent object\")\n",
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\n\"\"\" A module that contains various DaCe type definitions. \"\"\"\nfrom __future__ import print_function\nimport ctypes\nimport aenum\nimport inspect\nimport numpy\nimport re\nfrom functools import wraps\nfrom typing import Any\nfrom dace.config import Config\nfrom dace.registry import extensible_enum\n\n\n@extensible_enum\nclass StorageType(aenum.AutoNumberEnum):\n \"\"\" Available data storage types in the SDFG. \"\"\"\n\n Default = () #: Scope-default storage location\n Register = () #: Local data on registers, stack, or equivalent memory\n CPU_Pinned = () #: Host memory that can be DMA-accessed from accelerators\n CPU_Heap = () #: Host memory allocated on heap\n CPU_ThreadLocal = () #: Thread-local host memory\n GPU_Global = () #: Global memory\n GPU_Shared = () #: Shared memory\n FPGA_Global = () #: Off-chip global memory (DRAM)\n FPGA_Local = () #: On-chip memory (bulk storage)\n FPGA_Registers = () #: On-chip memory (fully partitioned registers)\n FPGA_ShiftRegister = () #: Only accessible at constant indices\n\n\n@extensible_enum\nclass ScheduleType(aenum.AutoNumberEnum):\n \"\"\" Available map schedule types in the SDFG. \"\"\"\n # TODO: Address different targets w.r.t. sequential\n # TODO: Add per-type properties for scope nodes. Consider TargetType enum\n # and a MapScheduler class\n\n Default = () #: Scope-default parallel schedule\n Sequential = () #: Sequential code (single-thread)\n MPI = () #: MPI processes\n CPU_Multicore = () #: OpenMP\n Unrolled = ()\n\n #: Default scope schedule for GPU code. Specializes to schedule GPU_Device and GPU_Global during inference.\n GPU_Default = ()\n GPU_Device = () #: Kernel\n GPU_ThreadBlock = () #: Thread-block code\n GPU_ThreadBlock_Dynamic = () #: Allows rescheduling work within a block\n GPU_Persistent = ()\n FPGA_Device = ()\n\n\n# A subset of GPU schedule types\nGPU_SCHEDULES = [\n ScheduleType.GPU_Device,\n ScheduleType.GPU_ThreadBlock,\n ScheduleType.GPU_ThreadBlock_Dynamic,\n ScheduleType.GPU_Persistent,\n]\n\n\nclass ReductionType(aenum.AutoNumberEnum):\n \"\"\" Reduction types natively supported by the SDFG compiler. \"\"\"\n\n Custom = () #: Defined by an arbitrary lambda function\n Min = () #: Minimum value\n Max = () #: Maximum value\n Sum = () #: Sum\n Product = () #: Product\n Logical_And = () #: Logical AND (&&)\n Bitwise_And = () #: Bitwise AND (&)\n Logical_Or = () #: Logical OR (||)\n Bitwise_Or = () #: Bitwise OR (|)\n Logical_Xor = () #: Logical XOR (!=)\n Bitwise_Xor = () #: Bitwise XOR (^)\n Min_Location = () #: Minimum value and its location\n Max_Location = () #: Maximum value and its location\n Exchange = () #: Set new value, return old value\n\n # Only supported in OpenMP\n Sub = () #: Subtraction (only supported in OpenMP)\n Div = () #: Division (only supported in OpenMP)\n\n\n@extensible_enum\nclass AllocationLifetime(aenum.AutoNumberEnum):\n \"\"\" Options for allocation span (when to allocate/deallocate) of data. \"\"\"\n\n Scope = () #: Allocated/Deallocated on innermost scope start/end\n State = () #: Allocated throughout the containing state\n SDFG = () #: Allocated throughout the innermost SDFG (possibly nested)\n Global = () #: Allocated throughout the entire program (outer SDFG)\n Persistent = () #: Allocated throughout multiple invocations (init/exit)\n\n\n@extensible_enum\nclass Language(aenum.AutoNumberEnum):\n \"\"\" Available programming languages for SDFG tasklets. \"\"\"\n\n Python = ()\n CPP = ()\n OpenCL = ()\n SystemVerilog = ()\n\n\nclass AccessType(aenum.AutoNumberEnum):\n \"\"\" Types of access to an `AccessNode`. \"\"\"\n\n ReadOnly = ()\n WriteOnly = ()\n ReadWrite = ()\n\n\n@extensible_enum\nclass InstrumentationType(aenum.AutoNumberEnum):\n \"\"\" Types of instrumentation providers.\n @note: Might be determined automatically in future versions.\n \"\"\"\n\n No_Instrumentation = ()\n Timer = ()\n PAPI_Counters = ()\n GPU_Events = ()\n\n@extensible_enum\nclass TilingType(aenum.AutoNumberEnum):\n \"\"\" Available tiling types in a `StripMining` transformation. \"\"\"\n\n Normal = ()\n CeilRange = ()\n NumberOfTiles = ()\n\n\n# Maps from ScheduleType to default StorageType\nSCOPEDEFAULT_STORAGE = {\n None: StorageType.CPU_Heap,\n ScheduleType.Sequential: StorageType.Register,\n ScheduleType.MPI: StorageType.CPU_Heap,\n ScheduleType.CPU_Multicore: StorageType.Register,\n ScheduleType.GPU_Default: StorageType.GPU_Global,\n ScheduleType.GPU_Persistent: StorageType.GPU_Global,\n ScheduleType.GPU_Device: StorageType.GPU_Shared,\n ScheduleType.GPU_ThreadBlock: StorageType.Register,\n ScheduleType.GPU_ThreadBlock_Dynamic: StorageType.Register,\n ScheduleType.FPGA_Device: StorageType.FPGA_Global,\n}\n\n# Maps from ScheduleType to default ScheduleType for sub-scopes\nSCOPEDEFAULT_SCHEDULE = {\n None: ScheduleType.CPU_Multicore,\n ScheduleType.Sequential: ScheduleType.Sequential,\n ScheduleType.MPI: ScheduleType.CPU_Multicore,\n ScheduleType.CPU_Multicore: ScheduleType.Sequential,\n ScheduleType.Unrolled: ScheduleType.CPU_Multicore,\n ScheduleType.GPU_Default: ScheduleType.GPU_Device,\n ScheduleType.GPU_Persistent: ScheduleType.GPU_Device,\n ScheduleType.GPU_Device: ScheduleType.GPU_ThreadBlock,\n ScheduleType.GPU_ThreadBlock: ScheduleType.Sequential,\n ScheduleType.GPU_ThreadBlock_Dynamic: ScheduleType.Sequential,\n ScheduleType.FPGA_Device: ScheduleType.FPGA_Device,\n}\n\n# Translation of types to C types\n_CTYPES = {\n None: \"void\",\n int: \"int\",\n float: \"float\",\n complex: \"dace::complex64\",\n bool: \"bool\",\n numpy.bool: \"bool\",\n numpy.bool_: \"bool\",\n numpy.int8: \"char\",\n numpy.int16: \"short\",\n numpy.int32: \"int\",\n numpy.int64: \"long long\",\n numpy.uint8: \"unsigned char\",\n numpy.uint16: \"unsigned short\",\n numpy.uint32: \"unsigned int\",\n numpy.uint64: \"unsigned long long\",\n numpy.float16: \"dace::float16\",\n numpy.float32: \"float\",\n numpy.float64: \"double\",\n numpy.complex64: \"dace::complex64\",\n numpy.complex128: \"dace::complex128\",\n}\n\n# Translation of types to OpenCL types\n_OCL_TYPES = {\n None: \"void\",\n int: \"int\",\n float: \"float\",\n bool: \"bool\",\n numpy.bool: \"bool\",\n numpy.bool_: \"bool\",\n numpy.int8: \"char\",\n numpy.int16: \"short\",\n numpy.int32: \"int\",\n numpy.int64: \"long long\",\n numpy.uint8: \"unsigned char\",\n numpy.uint16: \"unsigned short\",\n numpy.uint32: \"unsigned int\",\n numpy.uint64: \"unsigned long long\",\n numpy.float32: \"float\",\n numpy.float64: \"double\",\n numpy.complex64: \"complex float\",\n numpy.complex128: \"complex double\",\n}\n\n# Translation of types to OpenCL vector types\n_OCL_VECTOR_TYPES = {\n numpy.int8: \"char\",\n numpy.uint8: \"uchar\",\n numpy.int16: \"short\",\n numpy.uint16: \"ushort\",\n numpy.int32: \"int\",\n numpy.uint32: \"uint\",\n numpy.int64: \"long\",\n numpy.uint64: \"ulong\",\n numpy.float16: \"half\",\n numpy.float32: \"float\",\n numpy.float64: \"double\",\n numpy.complex64: \"complex float\",\n numpy.complex128: \"complex double\",\n}\n\n# Translation of types to ctypes types\n_FFI_CTYPES = {\n None: ctypes.c_void_p,\n int: ctypes.c_int,\n float: ctypes.c_float,\n complex: ctypes.c_uint64,\n bool: ctypes.c_bool,\n numpy.bool: ctypes.c_bool,\n numpy.bool_: ctypes.c_bool,\n numpy.int8: ctypes.c_int8,\n numpy.int16: ctypes.c_int16,\n numpy.int32: ctypes.c_int32,\n numpy.int64: ctypes.c_int64,\n numpy.uint8: ctypes.c_uint8,\n numpy.uint16: ctypes.c_uint16,\n numpy.uint32: ctypes.c_uint32,\n numpy.uint64: ctypes.c_uint64,\n numpy.float16: ctypes.c_uint16,\n numpy.float32: ctypes.c_float,\n numpy.float64: ctypes.c_double,\n numpy.complex64: ctypes.c_uint64,\n numpy.complex128: ctypes.c_longdouble,\n}\n\n# Number of bytes per data type\n_BYTES = {\n None: 0,\n int: 4,\n float: 4,\n complex: 8,\n bool: 1,\n numpy.bool: 1,\n numpy.bool_: 1,\n numpy.int8: 1,\n numpy.int16: 2,\n numpy.int32: 4,\n numpy.int64: 8,\n numpy.uint8: 1,\n numpy.uint16: 2,\n numpy.uint32: 4,\n numpy.uint64: 8,\n numpy.float16: 2,\n numpy.float32: 4,\n numpy.float64: 8,\n numpy.complex64: 8,\n numpy.complex128: 16,\n}\n\n\nclass typeclass(object):\n \"\"\" An extension of types that enables their use in DaCe.\n\n These types are defined for three reasons:\n 1. Controlling DaCe types\n 2. Enabling declaration syntax: `dace.float32[M,N]`\n 3. Enabling extensions such as `dace.struct` and `dace.vector`\n \"\"\"\n def __init__(self, wrapped_type):\n # Convert python basic types\n if isinstance(wrapped_type, str):\n try:\n wrapped_type = getattr(numpy, wrapped_type)\n except AttributeError:\n raise ValueError(\"Unknown type: {}\".format(wrapped_type))\n\n config_data_types = Config.get('compiler', 'default_data_types')\n if wrapped_type is int:\n if config_data_types.lower() == 'python':\n wrapped_type = numpy.int64\n elif config_data_types.lower() == 'c':\n wrapped_type = numpy.int32\n else:\n raise NameError(\n \"Unknown configuration for default_data_types: {}\".format(\n config_data_types))\n elif wrapped_type is float:\n if config_data_types.lower() == 'python':\n wrapped_type = numpy.float64\n elif config_data_types.lower() == 'c':\n wrapped_type = numpy.float32\n else:\n raise NameError(\n \"Unknown configuration for default_data_types: {}\".format(\n config_data_types))\n elif wrapped_type is complex:\n if config_data_types.lower() == 'python':\n wrapped_type = numpy.complex128\n elif config_data_types.lower() == 'c':\n wrapped_type = numpy.complex64\n else:\n raise NameError(\n \"Unknown configuration for default_data_types: {}\".format(\n config_data_types))\n\n self.type = wrapped_type # Type in Python\n self.ctype = _CTYPES[wrapped_type] # Type in C\n self.ctype_unaligned = self.ctype # Type in C (without alignment)\n self.dtype = self # For compatibility support with numpy\n self.bytes = _BYTES[wrapped_type] # Number of bytes for this type\n\n def __hash__(self):\n return hash((self.type, self.ctype))\n\n def to_string(self):\n \"\"\" A Numpy-like string-representation of the underlying data type. \"\"\"\n return self.type.__name__\n\n def as_ctypes(self):\n \"\"\" Returns the ctypes version of the typeclass. \"\"\"\n return _FFI_CTYPES[self.type]\n\n def as_numpy_dtype(self):\n return numpy.dtype(self.type)\n\n def is_complex(self):\n if self.type == numpy.complex64 or self.type == numpy.complex128:\n return True\n return False\n\n def to_json(self):\n if self.type is None:\n return None\n return self.type.__name__\n\n @staticmethod\n def from_json(json_obj, context=None):\n if json_obj is None:\n return typeclass(None)\n return json_to_typeclass(json_obj, context)\n\n # Create a new type\n def __call__(self, *args, **kwargs):\n return self.type(*args, **kwargs)\n\n def __eq__(self, other):\n return other is not None and self.ctype == other.ctype\n\n def __ne__(self, other):\n return other is not None and self.ctype != other.ctype\n\n def __getitem__(self, s):\n \"\"\" This is syntactic sugar that allows us to define an array type\n with the following syntax: dace.uint32[N,M]\n :return: A data.Array data descriptor.\n \"\"\"\n from dace import data\n\n if isinstance(s, list) or isinstance(s, tuple):\n return data.Array(self, tuple(s))\n return data.Array(self, (s, ))\n\n def __repr__(self):\n return self.ctype\n\n @property\n def base_type(self):\n return self\n\n @property\n def veclen(self):\n return 1\n\n @property\n def ocltype(self):\n return _OCL_TYPES[self.type]\n\n def as_arg(self, name):\n return self.ctype + ' ' + name\n\n\ndef max_value(dtype: typeclass):\n \"\"\"Get a max value literal for `dtype`.\"\"\"\n nptype = dtype.as_numpy_dtype()\n if nptype == numpy.bool:\n return True\n elif numpy.issubdtype(nptype, numpy.integer):\n return numpy.iinfo(nptype).max\n elif numpy.issubdtype(nptype, numpy.floating):\n return numpy.finfo(nptype).max\n\n raise TypeError('Unsupported type \"%s\" for maximum' % dtype)\n\n\ndef min_value(dtype: typeclass):\n \"\"\"Get a min value literal for `dtype`.\"\"\"\n nptype = dtype.as_numpy_dtype()\n if nptype == numpy.bool:\n return False\n elif numpy.issubdtype(nptype, numpy.integer):\n return numpy.iinfo(nptype).min\n elif numpy.issubdtype(nptype, numpy.floating):\n return numpy.finfo(nptype).min\n\n raise TypeError('Unsupported type \"%s\" for minimum' % dtype)\n\n\ndef result_type_of(lhs, *rhs):\n \"\"\"\n Returns the largest between two or more types (dace.types.typeclass)\n according to C semantics.\n \"\"\"\n if len(rhs) == 0:\n rhs = None\n elif len(rhs) > 1:\n result = lhs\n for r in rhs:\n result = result_type_of(result, r)\n return result\n\n rhs = rhs[0]\n\n # Extract the type if symbolic or data\n from dace.data import Data\n lhs = lhs.dtype if (type(lhs).__name__ == 'symbol'\n or isinstance(lhs, Data)) else lhs\n rhs = rhs.dtype if (type(rhs).__name__ == 'symbol'\n or isinstance(rhs, Data)) else rhs\n\n if lhs == rhs:\n return lhs # Types are the same, return either\n if lhs is None or lhs.type is None:\n return rhs # Use RHS even if it's None\n if rhs is None or rhs.type is None:\n return lhs # Use LHS\n\n # Vector types take precedence, largest vector size first\n if isinstance(lhs, vector) and not isinstance(rhs, vector):\n return lhs\n elif not isinstance(lhs, vector) and isinstance(rhs, vector):\n return rhs\n elif isinstance(lhs, vector) and isinstance(rhs, vector):\n if lhs.veclen == rhs.veclen:\n return vector(result_type_of(lhs.vtype, rhs.vtype), lhs.veclen)\n return lhs if lhs.veclen > rhs.veclen else rhs\n\n # Extract the numpy type so we can call issubdtype on them\n lhs_ = lhs.type if isinstance(lhs, typeclass) else lhs\n rhs_ = rhs.type if isinstance(rhs, typeclass) else rhs\n # Extract data sizes (seems the type itself doesn't expose this)\n size_lhs = lhs_(0).itemsize\n size_rhs = rhs_(0).itemsize\n # Both are integers\n if numpy.issubdtype(lhs_, numpy.integer) and numpy.issubdtype(\n rhs_, numpy.integer):\n # If one byte width is larger, use it\n if size_lhs > size_rhs:\n return lhs\n elif size_lhs < size_rhs:\n return rhs\n # Sizes are the same\n if numpy.issubdtype(lhs_, numpy.unsignedinteger):\n # No matter if right is signed or not, we must return unsigned\n return lhs\n else:\n # Left is signed, so either right is unsigned and we return that,\n # or both are signed\n return rhs\n # At least one side is a floating point number\n if numpy.issubdtype(lhs_, numpy.integer):\n return rhs\n if numpy.issubdtype(rhs_, numpy.integer):\n return lhs\n # Both sides are floating point numbers\n if size_lhs > size_rhs:\n return lhs\n return rhs # RHS is bigger\n\n\nclass pointer(typeclass):\n \"\"\" A data type for a pointer to an existing typeclass.\n\n Example use:\n `dace.pointer(dace.struct(x=dace.float32, y=dace.float32))`. \"\"\"\n def __init__(self, wrapped_typeclass):\n self._typeclass = wrapped_typeclass\n self.type = wrapped_typeclass.type\n self.bytes = int64.bytes\n self.ctype = wrapped_typeclass.ctype + \"*\"\n self.ctype_unaligned = wrapped_typeclass.ctype_unaligned + \"*\"\n self.dtype = self\n\n def to_json(self):\n return {'type': 'pointer', 'dtype': self._typeclass.to_json()}\n\n @staticmethod\n def from_json(json_obj, context=None):\n if json_obj['type'] != 'pointer':\n raise TypeError(\"Invalid type for pointer\")\n\n return pointer(json_to_typeclass(json_obj['dtype'], context))\n\n def as_ctypes(self):\n \"\"\" Returns the ctypes version of the typeclass. \"\"\"\n return ctypes.POINTER(_FFI_CTYPES[self.type])\n\n def as_numpy_dtype(self):\n return numpy.dtype(self.as_ctypes())\n\n @property\n def base_type(self):\n return self._typeclass\n\n @property\n def ocltype(self):\n return f\"{self.base_type.ocltype}*\"\n\n\nclass vector(typeclass):\n \"\"\"\n A data type for a vector-type of an existing typeclass.\n\n Example use: `dace.vector(dace.float32, 4)` becomes float4.\n \"\"\"\n def __init__(self, dtype: typeclass, vector_length: int):\n self.vtype = dtype\n self.type = dtype.type\n self._veclen = vector_length\n self.bytes = dtype.bytes * vector_length\n self.dtype = self\n\n def to_json(self):\n return {\n 'type': 'vector',\n 'dtype': self.vtype.to_json(),\n 'elements': str(self.veclen)\n }\n\n @staticmethod\n def from_json(json_obj, context=None):\n from dace.symbolic import pystr_to_symbolic\n return vector(json_to_typeclass(json_obj['dtype'], context),\n pystr_to_symbolic(json_obj['elements']))\n\n @property\n def ctype(self):\n return \"dace::vec<%s, %s>\" % (self.vtype.ctype, self.veclen)\n\n @property\n def ocltype(self):\n if self.veclen > 1:\n vectype = _OCL_VECTOR_TYPES[self.type]\n return f\"{vectype}{self.veclen}\"\n else:\n return self.base_type.ocltype\n\n @property\n def ctype_unaligned(self):\n return self.ctype\n\n def as_ctypes(self):\n \"\"\" Returns the ctypes version of the typeclass. \"\"\"\n return _FFI_CTYPES[self.type] * self.veclen\n\n def as_numpy_dtype(self):\n return numpy.dtype(self.as_ctypes())\n\n @property\n def base_type(self):\n return self.vtype\n\n @property\n def veclen(self):\n return self._veclen\n\n @veclen.setter\n def veclen(self, val):\n self._veclen = val\n\n\nclass struct(typeclass):\n \"\"\" A data type for a struct of existing typeclasses.\n\n Example use: `dace.struct(a=dace.int32, b=dace.float64)`.\n \"\"\"\n def __init__(self, name, **fields_and_types):\n # self._data = fields_and_types\n self.type = ctypes.Structure\n self.name = name\n # TODO: Assuming no alignment! Get from ctypes\n # self.bytes = sum(t.bytes for t in fields_and_types.values())\n self.ctype = name\n self.ctype_unaligned = name\n self.dtype = self\n self._parse_field_and_types(**fields_and_types)\n\n @property\n def fields(self):\n return self._data\n\n def to_json(self):\n return {\n 'type': 'struct',\n 'name': self.name,\n 'data': {k: v.to_json()\n for k, v in self._data.items()},\n 'length': {k: v\n for k, v in self._length.items()},\n 'bytes': self.bytes\n }\n\n @staticmethod\n def from_json(json_obj, context=None):\n if json_obj['type'] != \"struct\":\n raise TypeError(\"Invalid type for struct\")\n\n import dace.serialize # Avoid import loop\n\n ret = struct(json_obj['name'])\n ret._data = {\n k: json_to_typeclass(v, context)\n for k, v in json_obj['data'].items()\n }\n ret._length = {k: v for k, v in json_obj['length'].items()}\n ret.bytes = json_obj['bytes']\n\n return ret\n\n def _parse_field_and_types(self, **fields_and_types):\n self._data = dict()\n self._length = dict()\n self.bytes = 0\n for k, v in fields_and_types.items():\n if isinstance(v, tuple):\n t, l = v\n if not isinstance(t, pointer):\n raise TypeError(\"Only pointer types may have a length.\")\n if l not in fields_and_types.keys():\n raise ValueError(\n \"Length {} not a field of struct {}\".format(\n l, self.name))\n self._data[k] = t\n self._length[k] = l\n self.bytes += t.bytes\n else:\n if isinstance(v, pointer):\n raise TypeError(\"Pointer types must have a length.\")\n self._data[k] = v\n self.bytes += v.bytes\n\n def as_ctypes(self):\n \"\"\" Returns the ctypes version of the typeclass. \"\"\"\n # Populate the ctype fields for the struct class.\n fields = []\n for k, v in self._data.items():\n if isinstance(v, pointer):\n fields.append(\n (k,\n ctypes.c_void_p)) # ctypes.POINTER(_FFI_CTYPES[v.type])))\n else:\n fields.append((k, _FFI_CTYPES[v.type]))\n fields = sorted(fields, key=lambda f: f[0])\n # Create new struct class.\n struct_class = type(\"NewStructClass\", (ctypes.Structure, ),\n {\"_fields_\": fields})\n return struct_class\n\n def as_numpy_dtype(self):\n return numpy.dtype(self.as_ctypes())\n\n def emit_definition(self):\n return \"\"\"struct {name} {{\n{typ}\n}};\"\"\".format(\n name=self.name,\n typ='\\n'.join([\n \" %s %s;\" % (t.ctype, tname)\n for tname, t in sorted(self._data.items())\n ]),\n )\n\n\n####### Utility function ##############\ndef ptrtonumpy(ptr, inner_ctype, shape):\n import ctypes\n import numpy as np\n return np.ctypeslib.as_array(\n ctypes.cast(ctypes.c_void_p(ptr), ctypes.POINTER(inner_ctype)), shape)\n\n\ndef _atomic_counter_generator():\n ctr = 0\n while True:\n ctr += 1\n yield ctr\n\n\nclass callback(typeclass):\n \"\"\" Looks like dace.callback([None, <some_native_type>], *types)\"\"\"\n def __init__(self, return_type, *variadic_args):\n self.uid = next(_atomic_counter_generator())\n from dace import data\n if isinstance(return_type, data.Array):\n raise TypeError(\"Callbacks that return arrays are \"\n \"not supported as per SDFG semantics\")\n self.dtype = self\n self.return_type = return_type\n self.input_types = []\n for arg in variadic_args:\n if isinstance(arg, typeclass):\n pass\n elif isinstance(arg, data.Data):\n pass\n elif isinstance(arg, str):\n arg = json_to_typeclass(arg)\n else:\n raise TypeError(\"Cannot resolve type from: {}\".format(arg))\n self.input_types.append(arg)\n self.bytes = int64.bytes\n self.type = self\n self.ctype = self\n\n def as_ctypes(self):\n \"\"\" Returns the ctypes version of the typeclass. \"\"\"\n from dace import data\n\n return_ctype = (self.return_type.as_ctypes()\n if self.return_type is not None else None)\n input_ctypes = []\n for some_arg in self.input_types:\n if isinstance(some_arg, data.Array):\n input_ctypes.append(ctypes.c_void_p)\n else:\n input_ctypes.append(\n some_arg.as_ctypes() if some_arg is not None else None)\n if input_ctypes == [None]:\n input_ctypes = []\n cf_object = ctypes.CFUNCTYPE(return_ctype, *input_ctypes)\n return cf_object\n\n def as_numpy_dtype(self):\n return numpy.dtype(self.as_ctypes())\n\n def as_arg(self, name):\n from dace import data\n\n return_type_cstring = (self.return_type.ctype\n if self.return_type is not None else \"void\")\n input_type_cstring = []\n for arg in self.input_types:\n if isinstance(arg, data.Array):\n # const hack needed to prevent error in casting const int* to int*\n input_type_cstring.append(arg.dtype.ctype + \" const *\")\n else:\n input_type_cstring.append(arg.ctype if arg is not None else \"\")\n cstring = return_type_cstring + \" \" + \"(*\" + name + \")(\"\n for index, inp_arg in enumerate(input_type_cstring):\n if index > 0:\n cstring = cstring + \",\"\n cstring = cstring + inp_arg\n cstring = cstring + \")\"\n return cstring\n\n def get_trampoline(self, pyfunc, other_arguments):\n from functools import partial\n from dace import data, symbolic\n\n arraypos = []\n types_and_sizes = []\n for index, arg in enumerate(self.input_types):\n if isinstance(arg, data.Array):\n arraypos.append(index)\n types_and_sizes.append((arg.dtype.as_ctypes(), arg.shape))\n if len(arraypos) == 0:\n return pyfunc\n\n def trampoline(orig_function, indices, data_types_and_sizes,\n *other_inputs):\n list_of_other_inputs = list(other_inputs)\n for i in indices:\n data_type, size = data_types_and_sizes[i]\n non_symbolic_sizes = []\n for s in size:\n if isinstance(s, symbolic.symbol):\n non_symbolic_sizes.append(other_arguments[str(s)])\n else:\n non_symbolic_sizes.append(s)\n list_of_other_inputs[i] = ptrtonumpy(other_inputs[i], data_type,\n non_symbolic_sizes)\n return orig_function(*list_of_other_inputs)\n\n return partial(trampoline, pyfunc, arraypos, types_and_sizes)\n\n def __hash__(self):\n return hash((self.uid, self.return_type, *self.input_types))\n\n def to_json(self):\n return {\n 'type': 'callback',\n 'arguments': [i.to_json() for i in self.input_types],\n 'returntype':\n self.return_type.to_json() if self.return_type else None\n }\n\n @staticmethod\n def from_json(json_obj, context=None):\n if json_obj['type'] != \"callback\":\n raise TypeError(\"Invalid type for callback\")\n\n rettype = json_obj['returntype']\n\n import dace.serialize # Avoid import loop\n\n return callback(\n json_to_typeclass(rettype) if rettype else None,\n *(dace.serialize.from_json(arg, context)\n for arg in json_obj['arguments']))\n\n def __str__(self):\n return \"dace.callback\"\n\n def __repr__(self):\n return \"dace.callback\"\n\n def __eq__(self, other):\n if not isinstance(other, callback):\n return False\n return self.uid == other.uid\n\n def __ne__(self, other):\n return not self.__eq__(other)\n\n\n# Helper function to determine whether a global variable is a constant\n_CONSTANT_TYPES = [\n type(None),\n int,\n float,\n complex,\n str,\n bool,\n numpy.bool_,\n numpy.intc,\n numpy.intp,\n numpy.int8,\n numpy.int16,\n numpy.int32,\n numpy.int64,\n numpy.uint8,\n numpy.uint16,\n numpy.uint32,\n numpy.uint64,\n numpy.float16,\n numpy.float32,\n numpy.float64,\n numpy.complex64,\n numpy.complex128,\n typeclass, # , type\n]\n\n\ndef isconstant(var):\n \"\"\" Returns True if a variable is designated a constant (i.e., that can be\n directly generated in code).\n \"\"\"\n return type(var) in _CONSTANT_TYPES\n\n\nbool = typeclass(numpy.bool)\nbool_ = typeclass(numpy.bool_)\nint8 = typeclass(numpy.int8)\nint16 = typeclass(numpy.int16)\nint32 = typeclass(numpy.int32)\nint64 = typeclass(numpy.int64)\nuint8 = typeclass(numpy.uint8)\nuint16 = typeclass(numpy.uint16)\nuint32 = typeclass(numpy.uint32)\nuint64 = typeclass(numpy.uint64)\nfloat16 = typeclass(numpy.float16)\nfloat32 = typeclass(numpy.float32)\nfloat64 = typeclass(numpy.float64)\ncomplex64 = typeclass(numpy.complex64)\ncomplex128 = typeclass(numpy.complex128)\n\nDTYPE_TO_TYPECLASS = {\n int: typeclass(int),\n float: typeclass(float),\n complex: typeclass(complex),\n numpy.bool: bool,\n numpy.bool_: bool_,\n numpy.int8: int8,\n numpy.int16: int16,\n numpy.int32: int32,\n numpy.int64: int64,\n numpy.uint8: uint8,\n numpy.uint16: uint16,\n numpy.uint32: uint32,\n numpy.uint64: uint64,\n numpy.float16: float16,\n numpy.float32: float32,\n numpy.float64: float64,\n numpy.complex64: complex64,\n numpy.complex128: complex128,\n # FIXME\n numpy.longlong: int64,\n numpy.ulonglong: uint64\n}\n\nTYPECLASS_TO_STRING = {\n bool: \"dace::bool\",\n bool_: \"dace::bool_\",\n uint8: \"dace::uint8\",\n uint16: \"dace::uint16\",\n uint32: \"dace::uint32\",\n uint64: \"dace::uint64\",\n int8: \"dace::int8\",\n int16: \"dace::int16\",\n int32: \"dace::int32\",\n int64: \"dace::int64\",\n float16: \"dace::float16\",\n float32: \"dace::float32\",\n float64: \"dace::float64\",\n complex64: \"dace::complex64\",\n complex128: \"dace::complex128\"\n}\n\nTYPECLASS_STRINGS = [\n \"int\",\n \"float\",\n \"complex\",\n \"bool\",\n \"bool_\",\n \"int8\",\n \"int16\",\n \"int32\",\n \"int64\",\n \"uint8\",\n \"uint16\",\n \"uint32\",\n \"uint64\",\n \"float16\",\n \"float32\",\n \"float64\",\n \"complex64\",\n \"complex128\"\n]\n\nINTEGER_TYPES = [\n bool,\n bool_,\n int8,\n int16,\n int32,\n int64,\n uint8,\n uint16,\n uint32,\n uint64\n]\n\n#######################################################\n# Allowed types\n\n# Lists allowed modules and maps them to C++ namespaces for code generation\n_ALLOWED_MODULES = {\n \"builtins\": \"\",\n \"dace\": \"dace::\",\n \"math\": \"dace::math::\",\n \"cmath\": \"dace::cmath::\",\n}\n\n# Lists allowed modules and maps them to OpenCL\n_OPENCL_ALLOWED_MODULES = {\"builtins\": \"\", \"dace\": \"\", \"math\": \"\"}\n\n\ndef ismodule(var):\n \"\"\" Returns True if a given object is a module. \"\"\"\n return inspect.ismodule(var)\n\n\ndef ismoduleallowed(var):\n \"\"\" Helper function to determine the source module of an object, and\n whether it is allowed in DaCe programs. \"\"\"\n mod = inspect.getmodule(var)\n try:\n for m in _ALLOWED_MODULES:\n if mod.__name__ == m or mod.__name__.startswith(m + \".\"):\n return True\n except AttributeError:\n return False\n return False\n\n\ndef ismodule_and_allowed(var):\n \"\"\" Returns True if a given object is a module and is one of the allowed\n modules in DaCe programs. \"\"\"\n if inspect.ismodule(var):\n if var.__name__ in _ALLOWED_MODULES:\n return True\n return False\n\n\ndef isallowed(var, allow_recursive=False):\n \"\"\" Returns True if a given object is allowed in a DaCe program.\n\n :param allow_recursive: whether to allow dicts or lists containing constants.\n \"\"\"\n from dace.symbolic import issymbolic\n\n if allow_recursive:\n if isinstance(var, (list, tuple)):\n return all(isallowed(v, allow_recursive=False) for v in var)\n\n return isconstant(var) or ismodule(var) or issymbolic(\n var) or isinstance(var, typeclass)\n\nclass DebugInfo:\n \"\"\" Source code location identifier of a node/edge in an SDFG. Used for\n IDE and debugging purposes. \"\"\"\n def __init__(self,\n start_line,\n start_column=0,\n end_line=-1,\n end_column=0,\n filename=None):\n self.start_line = start_line\n self.end_line = end_line if end_line >= 0 else start_line\n self.start_column = start_column\n self.end_column = end_column\n self.filename = filename\n\n # NOTE: Manually marking as serializable to avoid an import loop\n # The data structure is a property on its own (pointing to a range of code),\n # so it is serialized as a dictionary directly.\n def to_json(self):\n return dict(type='DebugInfo',\n start_line=self.start_line,\n end_line=self.end_line,\n start_column=self.start_column,\n end_column=self.end_column,\n filename=self.filename)\n\n @staticmethod\n def from_json(json_obj, context=None):\n return DebugInfo(json_obj['start_line'], json_obj['start_column'],\n json_obj['end_line'], json_obj['end_column'],\n json_obj['filename'])\n\n\n######################################################\n# Static (utility) functions\n\n\ndef json_to_typeclass(obj, context=None):\n # TODO: this does two different things at the same time. Should be split\n # into two separate functions.\n from dace.serialize import get_serializer\n if isinstance(obj, str):\n return get_serializer(obj)\n elif isinstance(obj, dict) and \"type\" in obj:\n return get_serializer(obj[\"type\"]).from_json(obj, context)\n else:\n raise ValueError(\"Cannot resolve: {}\".format(obj))\n\n\ndef paramdec(dec):\n \"\"\" Parameterized decorator meta-decorator. Enables using `@decorator`,\n `@decorator()`, and `@decorator(...)` with the same function. \"\"\"\n @wraps(dec)\n def layer(*args, **kwargs):\n\n # Allows the use of @decorator, @decorator(), and @decorator(...)\n if len(kwargs) == 0 and len(args) == 1 and callable(\n args[0]) and not isinstance(args[0], typeclass):\n return dec(*args, **kwargs)\n\n @wraps(dec)\n def repl(f):\n return dec(f, *args, **kwargs)\n\n return repl\n\n return layer\n\n\n#############################################\n\n\ndef deduplicate(iterable):\n \"\"\" Removes duplicates in the passed iterable. \"\"\"\n return type(iterable)(\n [i for i in sorted(set(iterable), key=lambda x: iterable.index(x))])\n\n\ndef validate_name(name):\n if not isinstance(name, str) or len(name) == 0:\n return False\n if name in {'True', 'False', 'None'}:\n return False\n if re.match(r'^[a-zA-Z_][a-zA-Z_0-9]*$', name) is None:\n return False\n return True\n\n\ndef can_access(schedule: ScheduleType, storage: StorageType):\n \"\"\"\n Identifies whether a container of a storage type can be accessed in a specific schedule.\n \"\"\"\n if storage == StorageType.Register:\n return True\n\n if schedule in [\n ScheduleType.GPU_Device, ScheduleType.GPU_Persistent,\n ScheduleType.GPU_ThreadBlock, ScheduleType.GPU_ThreadBlock_Dynamic, ScheduleType.GPU_Default,\n ]:\n return storage in [\n StorageType.GPU_Global, StorageType.GPU_Shared,\n StorageType.CPU_Pinned\n ]\n elif schedule in [ScheduleType.Default, ScheduleType.CPU_Multicore]:\n return storage in [\n StorageType.Default, StorageType.CPU_Heap, StorageType.CPU_Pinned,\n StorageType.CPU_ThreadLocal\n ]\n elif schedule in [ScheduleType.FPGA_Device]:\n return storage in [\n StorageType.FPGA_Local, StorageType.FPGA_Global,\n StorageType.FPGA_Registers, StorageType.FPGA_ShiftRegister,\n StorageType.CPU_Pinned\n ]\n elif schedule == ScheduleType.Sequential:\n raise ValueError(\"Not well defined\")\n\n\ndef can_allocate(storage: StorageType, schedule: ScheduleType):\n \"\"\"\n Identifies whether a container of a storage type can be allocated in a\n specific schedule. Used to determine arguments to subgraphs by the\n innermost scope that a container can be allocated in. For example,\n FPGA_Global memory cannot be allocated from within the FPGA scope, or\n GPU shared memory cannot be allocated outside of device-level code.\n\n :param storage: The storage type of the data container to allocate.\n :param schedule: The scope schedule to query.\n :return: True if the container can be allocated, False otherwise.\n \"\"\"\n # Host-only allocation\n if storage in [\n StorageType.CPU_Heap, StorageType.CPU_Pinned,\n StorageType.CPU_ThreadLocal, StorageType.FPGA_Global,\n StorageType.GPU_Global\n ]:\n return schedule in [\n ScheduleType.CPU_Multicore, ScheduleType.Sequential,\n ScheduleType.MPI\n ]\n\n # FPGA-local memory\n if storage in [StorageType.FPGA_Local, StorageType.FPGA_Registers]:\n return schedule == ScheduleType.FPGA_Device\n\n # GPU-local memory\n if storage == StorageType.GPU_Shared:\n return schedule in [\n ScheduleType.GPU_Device,\n ScheduleType.GPU_ThreadBlock,\n ScheduleType.GPU_ThreadBlock_Dynamic,\n ScheduleType.GPU_Persistent,\n ScheduleType.GPU_Default\n ]\n\n # The rest (Registers) can be allocated everywhere\n return True\n\n\ndef is_array(obj: Any) -> bool:\n \"\"\"\n Returns True if an object implements the ``data_ptr()``,\n ``__array_interface__`` or ``__cuda_array_interface__`` standards\n (supported by NumPy, Numba, CuPy, PyTorch, etc.). If the interface is\n supported, pointers can be directly obtained using the\n ``_array_interface_ptr`` function.\n :param obj: The given object.\n :return: True iff the object implements the array interface.\n \"\"\"\n if (hasattr(obj, 'data_ptr') or hasattr(obj, '__array_interface__')\n or hasattr(obj, '__cuda_array_interface__')):\n return hasattr(obj, 'shape') and len(obj.shape) > 0\n return False\n",
"# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.\nimport dace\nimport numpy as np\n\nW = dace.symbol('W')\n\nnumber = 42\n\n\[email protected]\ndef f(A, number):\n @dace.map(_[0:W])\n def bla(i):\n inp << A[i]\n out >> number[i]\n out = 2 * inp\n\n\[email protected]\ndef prog(A, B):\n no = dace.define_local([number], dace.float32)\n number = dace.define_local([W], dace.float32)\n\n f(A, number)\n\n @dace.map(_[0:W])\n def bla2(i):\n inp << number[i]\n out >> B[i]\n out = 2 * inp\n\n\ndef test():\n W.set(3)\n\n A = dace.ndarray([W])\n B = dace.ndarray([W])\n\n A[:] = np.mgrid[0:W.get()]\n B[:] = dace.float32(0.0)\n\n prog(A, B, W=W)\n\n diff = np.linalg.norm(4 * A - B) / W.get()\n print(\"Difference:\", diff)\n assert diff <= 1e-5\n\n\nif __name__ == \"__main__\":\n test()\n"
] |
[
[
"numpy.allclose",
"numpy.random.rand",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
],
[
"numpy.linalg.norm",
"numpy.allclose"
],
[
"numpy.any"
],
[
"numpy.issubdtype",
"numpy.iinfo",
"numpy.dtype",
"numpy.finfo"
],
[
"numpy.linalg.norm"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
previtus/facenet
|
[
"9597e7955014f05f655c7617c1edbf41eaf1a3e3"
] |
[
"src/load_my_dataset.py"
] |
[
"#import cPickle as pickle\nimport pickle\nimport numpy as np\n\n#pickle.dump([embeddings], open('embeddings_from_XYZ.p', 'wb'))\n\n# ../\nload_path = \"ebmeddings_from_celeb_12k_noflips_160px.p\"\nload_path = \"embeddings_from_celeb_12kwithflips_160px.p\"\nload_path = \"embeddings_from_celeb_30k_noflip_160px.p\"\nwith open(load_path, \"rb\") as input_file:\n embeddings = pickle.load(input_file, encoding='latin1')\n\nprint(\"embeddings\",embeddings)\nprint(\"embeddings\",len(embeddings[0]))\n#print(\"embeddings.shape\" ,embeddings.shape)\n\n\nembeddings_n = np.asarray(embeddings)\nprint(\"embeddings_n\",embeddings_n)\nprint(\"embeddings_n.shape\" ,embeddings_n.shape)\n"
] |
[
[
"numpy.asarray"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
A4Vision/naive-decision-tree
|
[
"91ee33c91b3f8a596991959ac23c161b5552d9bf",
"91ee33c91b3f8a596991959ac23c161b5552d9bf"
] |
[
"tree/optimized_train/runtime_stats.py",
"tree/naive_train/train_tree.py"
] |
[
"from typing import Tuple\n\nimport pandas as pd\n\n\nclass RuntimeStats:\n def __init__(self, all_data_shape: Tuple[int, int]):\n self._x_shape = []\n self._data_size = []\n self._current_x_shape = None\n self._data_shape = all_data_shape\n\n def as_dataframe(self) -> pd.DataFrame:\n res = pd.DataFrame({'x_shape': self._x_shape,\n 'data_size': self._data_size})\n res['runtime'] = res['data_size'].map(lambda x: x[0] * x[1])\n return res\n\n def start_decision_rule_calculation(self, x_shape):\n self._current_x_shape = x_shape\n\n def record_iteration(self, data_size: Tuple[int, int]):\n self._x_shape.append(self._current_x_shape)\n self._data_size.append(data_size)\n",
"import copy\nfrom typing import Optional\n\nimport numpy as np\n\nfrom tree.descision_tree import DecisionTree, LeafNode, SimpleDecisionRule, combine_two_trees\nfrom tree.naive_train.optimal_cut import calc_score, find_cut\n\n\ndef select_decision_rule(x, y, params) -> Optional[SimpleDecisionRule]:\n scores_rules = []\n for feature_i in range(x.shape[1]):\n x_row = x.T[feature_i]\n argsort = np.argsort(x_row)\n y_sorted = y[argsort]\n x_row_sorted = x_row[argsort]\n i, score = find_cut(y_sorted, params['gamma'])\n no_split_score = calc_score(y_sorted, params['gamma'])\n if score >= no_split_score:\n # Better not to split at this point\n scores_rules.append((score, None))\n else:\n scores_rules.append((score, SimpleDecisionRule(x_row_sorted[i - 1], feature_i)))\n best_score, best_rule = min(scores_rules, key=lambda x: x[0])\n return best_rule\n\n\ndef _set_defaults(params):\n params.setdefault('max_depth', 2)\n params.setdefault('gamma', 0.001)\n\n\ndef train(x, y, params) -> DecisionTree:\n assert y.shape[0] > 0\n params_copy = copy.deepcopy(params)\n _set_defaults(params_copy)\n assert isinstance(params_copy['max_depth'], int) and params_copy['max_depth'] >= 0\n if params['max_depth'] == 0 or y.shape[0] == 1:\n return DecisionTree(LeafNode(np.average(y)))\n else:\n params_copy['max_depth'] -= 1\n decision_rule = select_decision_rule(x, y, params)\n\n if decision_rule is None:\n return DecisionTree(LeafNode(np.average(y)))\n b_right = decision_rule.decide_is_right_array(x)\n b_left = ~b_right\n tree_right = train(x[b_right], y[b_right], params)\n tree_left = train(x[b_left], y[b_left], params)\n return combine_two_trees(decision_rule, tree_left, tree_right)\n"
] |
[
[
"pandas.DataFrame"
],
[
"numpy.argsort",
"numpy.average"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
stnie/FrontDetection
|
[
"742ebf9619dcde40d42891073739945a05631ea3"
] |
[
"getRandomSamples.py"
] |
[
"import numpy as np\nimport torch\nimport os\n\nfrom scipy.ndimage import distance_transform_edt\n\nfrom Models.FDU3D import *\n\nfrom tqdm import tqdm\nimport argparse\n\nfrom era5dataset.FrontDataset import *\n# ERA Extractors\nfrom era5dataset.EraExtractors import *\n\nfrom IOModules.csbReader import *\n\nfrom NetInfoImport import *\n\nimport netCDF4\nfrom statsmodels.regression.quantile_regression import QuantReg\nimport statsmodels.api as sm\n\nfrom InferOutputs import setupDataLoader\n\n\ndef parseArguments():\n parser = argparse.ArgumentParser(description='FrontNet')\n parser.add_argument('--data', type = str, help = 'path to fronts either folder of preCalculated (unwidened!) network results or a file containing the preprocessed (widened!) results!')\n parser.add_argument('--mask', type = str, help = 'path to mask netCDF4 file for extreme events (a single File with containing all necessary timestamps!)')\n parser.add_argument('--heightMap', type=str, help = 'path to netCDF4 containing height map as geopotential')\n parser.add_argument(\"--season\", type = str, default = \"\", help = \"which season to evaluate for (djf, mam, jja, son), default no season\")\n parser.add_argument('--num_samples', type = int, default = -1, help='number of samples to infer from the dataset')\n parser.add_argument('--outname', help='name of the output')\n # for future use, currently default is fine\n parser.add_argument('--calcVar', type = str, default = \"precip\", help = 'which variable to measure along the cross section')\n args = parser.parse_args()\n #args.binary = args.classes == 1\n \n return args\n\ndef setupDataset(args):\n data_fold = args.data\n cropsize = (720, 1440)\n mapTypes = {\"all\": (\"\", (90,-89.75), (-180,180), (-0.25,0.25)) }\n\n myLevelRange = np.arange(105,138,4)\n\n myTransform = (None, None)\n labelThickness = 1\n\n myEraExtractor = BinaryResultExtractor() \n\n # Create Dataset\n data_set = WeatherFrontDataset(data_dir=data_fold, mapTypes = mapTypes, levelRange = myLevelRange, transform=myTransform, outSize=cropsize, labelThickness= labelThickness, era_extractor = myEraExtractor, has_subfolds = (False, False), asCoords = False, removePrefix = 0)\n return data_set\n\n\ndef performInference(loader, num_samples, parOpt, args):\n border = 20\n \n # number of iterations of dilation\n Boxsize = 10\n\n data_set = loader.dataset\n no = data_set.removePrefix\n mapType = \"all\"\n \n # get the day per month\n dpm = [0,31,29,31,30,31,30,31,31,30,31,30,31]\n # cumulative sum to get the number of hours since YYYY-12-01_00\n ssf = np.cumsum(np.array(dpm))*24\n seasons = [\"djf\", \"mam\", \"jja\", \"son\"]\n tgt_season = args.season\n\n tgtvar = \"\"\n if args.calcVar == \"precip\":\n front_file = args.data\n tgtvar = \"tp\"\n mask_file = args.mask\n # if it is a folder we will automatically look for an appropriate file\n if(os.path.isdir(args.mask)):\n mask_file = os.path.join(args.mask, \"tmp2016_eventMask_{}.nc\".format(tgt_season))\n height_file = args.heightMap\n else:\n print(\"Type not implemented. Abort!\")\n exit(1)\n # data is a folder => use the dataset to read all data\n # else read a single file (that is already processed!)\n singleFiles =os.path.isdir(front_file)\n \n skip = 0\n total_fronts = np.zeros((num_samples, 680, 1400, 5), dtype=np.bool)\n if(singleFiles):\n # load all files individually and combine them within this script\n for idx, data in enumerate(tqdm(loader, desc ='eval'), 0):\n if idx<skip:\n continue\n if(idx == num_samples+skip):\n break\n if(not torch.cuda.is_available()):\n inputs, labels, filename = data.data, data.labels, data.filenames\n else:\n inputs, labels, filename = data\n inputs = inputs.cpu()\n \n inputs = inputs[:,border:-border,border:-border,:]\n\n front = inputs.numpy()\n for ftype in range(5):\n front[:,:,ftype] = distance_transform_edt(1-front[:,:,ftype])<=Boxsize\n total_fronts[idx,:,:,:] = front[:,:,:].astype(np.bool)\n # Uncomment to Write the calculated fronts as a single file \n #total_fronts.tofile(os.path.join(args.mask, \"tmp2016_front4d_l2_v2.bin\"))\n #exit(1)\n else:\n # Already precalculated fronts in a single file. Load only once. Also No need for widening!\n total_fronts = np.fromfile(front_file, dtype=np.bool).reshape(-1,680,1400,5)\n\n if(tgt_season == \"djf\"):\n #the input file is ordered chronologically so d is the last entry and jf the first two months\n total_fronts = np.concatenate((total_fronts[:ssf[2]], total_fronts[ssf[11]:ssf[12]]), axis=0)\n elif(tgt_season == \"mam\"):\n total_fronts = total_fronts[ssf[2]:ssf[5]]\n elif(tgt_season == \"jja\"):\n total_fronts = total_fronts[ssf[5]:ssf[8]]\n elif(tgt_season == \"son\"):\n print(total_fronts.shape)\n total_fronts = total_fronts[ssf[8]:ssf[11]]\n print(total_fronts.shape)\n \n\n # Read the event mask\n rootgrp = netCDF4.Dataset(os.path.realpath(mask_file), \"r\", format=\"NETCDF4\", parallel=False)\n tgt_latrange, tgt_lonrage = data_set.getCropRange(data_set.mapTypes[mapType][1], data_set.mapTypes[mapType][2], data_set.mapTypes[mapType][3], 0)\n # the files have lat 90 - -90, lon 0 - 360\n # => we need to offset lonrange\n exEvs = np.zeros((rootgrp[\"time\"][:].shape[0], abs(int((tgt_latrange[0]-tgt_latrange[1])*4)), abs(int((tgt_lonrage[1]-tgt_lonrage[0])*4))), dtype =np.bool)\n print(exEvs.shape[0], num_samples)\n if(tgt_lonrage[0] < 0 and tgt_lonrage[1] >= 0):\n exEvs[:,:,:-int(tgt_lonrage[0])*4] = (rootgrp[tgtvar][:,int((90-tgt_latrange[0])*4):int((90-tgt_latrange[1])*4), int((tgt_lonrage[0])*4):]).astype(np.bool)\n exEvs[:,:,-int(tgt_lonrage[0])*4:] = (rootgrp[tgtvar][:,int((90-tgt_latrange[0])*4):int((90-tgt_latrange[1])*4), :int((tgt_lonrage[1])*4)]).astype(np.bool)\n else:\n exEvs = rootgrp[tgtvar][:,int((90-tgt_latrange[0])*4):int((90-tgt_latrange[1])*4), int((tgt_lonrage[0])*4):int((tgt_lonrage[1])*4)]\n rootgrp.close()\n\n # our output has a border => so add this to the mask file, too\n exEvs = exEvs[:,border:-border,border:-border] \n # create the aggregated events\n evcount = np.sum(exEvs, axis=0)\n\n\n\n # read the height map and remove all height +5 pixel radius\n rootgrp = netCDF4.Dataset(os.path.realpath(height_file), \"r\", format=\"NETCDF4\", parallel=False)\n tgt_latrange, tgt_lonrage = data_set.getCropRange(data_set.mapTypes[mapType][1], data_set.mapTypes[mapType][2], data_set.mapTypes[mapType][3], 0)\n # the files have lat 90 - -90, lon 0 - 360\n # => we need to offset lonrange\n heightmap = np.zeros((abs(int((tgt_latrange[0]-tgt_latrange[1])*4)), abs(int((tgt_lonrage[1]-tgt_lonrage[0])*4))))\n if(tgt_lonrage[0] < 0 and tgt_lonrage[1] >= 0):\n heightmap[:,:-int((tgt_lonrage[0])*4)] = (rootgrp[\"z\"][0,int((90-tgt_latrange[0])*4):int((90-tgt_latrange[1])*4), int((tgt_lonrage[0])*4):])\n heightmap[:,-int((tgt_lonrage[0])*4):] = (rootgrp[\"z\"][0,int((90-tgt_latrange[0])*4):int((90-tgt_latrange[1])*4), :int((tgt_lonrage[1])*4)])\n else:\n heightmap = rootgrp[\"z\"][0,int((90-tgt_latrange[0])*4):int((90-tgt_latrange[1])*4), int((tgt_lonrage[0])*4):int((tgt_lonrage[1])*4)]\n rootgrp.close()\n\n\n # transform geopotential to height in m\n heightmap /= 9.80665\n # get all points below 2000m (such that point >2000m are background for distance transform)\n heightmap = heightmap[border:-border,border:-border] < 2000\n # Get all points that are more than 5 points away from any background (point > 2000m)\n heightmap = distance_transform_edt(heightmap) > 5\n # save map for future reference\n heightmap.tofile(\"heightFilterMap.bin\")\n\n \n # initialize with set seed for reproducability\n rng = np.random.default_rng(12345)\n\n \n # when evaluating seasons, we should expect less extreme events per season\n casePerPoint = 13\n pointsPerList = 6\n # no specific season chosen => whole year. We now take ~4*13 ~ 50 cases per Point instead\n if(not (tgt_season in seasons)):\n casePerPoint = 50\n # fpop is the aggregate of the season\n fpop = np.sum(total_fronts, axis=0)/total_fronts.shape[0]\n # generate the event lists\n # get the total count of extreme events\n # get the ratio of front events \n \n k = 0\n # use 1% steps\n stepsize = 1\n # Only use points within [20 : 60] N/S as basepoints\n fpop = np.concatenate((fpop[100:261],fpop[420:581]),axis=0)\n heightmap = np.concatenate((heightmap[100:261], heightmap[420:581]), axis=0)\n validHeights = np.nonzero(heightmap)\n num_bpoints = 20\n for k in range(5):\n print(np.max(fpop[:,:,k]*100))\n # get the maximum frontal frequency (rounded)\n maxPct = np.round(np.max(fpop[:,:,k][validHeights]*100)).astype(int)\n print(maxPct, flush = True)\n # chose points to have at least 600 samples (basepoints * bins), minimum 10 basepoints per bin \n num_bpoints = max(10,(600//maxPct)+1)\n print(num_bpoints)\n maxPct+=1\n mySampleArray = np.zeros((maxPct, num_bpoints, 1000,5))\n myProbArray = np.zeros((maxPct,num_bpoints,1000))\n for p in range(0,maxPct,stepsize):\n print(p,end=\": \")\n low_p = (p-stepsize) / 100\n up_p = p/100\n # get all values in the midlats that are within the percentile range\n # the first bin contains only the 0% frequencies\n if(p == 0):\n allPoss = np.nonzero(fpop[:,:,k]<=up_p)\n # all other bins contain the ]k-stepsize : k] % frequencies\n else:\n allPoss = np.nonzero((fpop[:,:,k]<=up_p) * (fpop[:,:,k]>(low_p)))\n # filter, to obtain only points that are also lower than the height threshold\n validPoss = np.nonzero(heightmap[allPoss])\n # build a tuple for indexing containing all points within the current percentile and simultaneously being lower than the height threshold\n allPos = (allPoss[0][validPoss], allPoss[1][validPoss])\n\n if(len(allPos[0])<num_bpoints):\n print(\"invalid p:\", p, flush = True)\n mySampleArray[p,:,:,:] = np.NaN\n continue\n rand_points = rng.choice(len(allPos[0]), size=num_bpoints, replace = False)\n xsmpls = allPos[0][rand_points]\n ysmpls = allPos[1][rand_points]\n for point in range(num_bpoints):\n print(point, end=\", \")\n xsmp = xsmpls[point]\n ysmp = ysmpls[point]\n myProbArray[p,point,:] = fpop[xsmp,ysmp,k]\n # get the hemisphere of the current bp\n north = xsmp < 161\n # define the range of the opposite hemisphere from the event mask\n fr = 420 if north else 100\n to = 581 if north else 261\n # The offset in the total fronts array\n # as fpop has the tropics removed its southern hemisphere has an additional 159 pixel offset (420-261)\n xoff = 100 if north else 259\n # get all points on the opposite hemisphere, where enough cases of extreme precipitation were identified\n hemPos = np.nonzero(evcount[fr:to]>= casePerPoint)\n for li in range(1000):\n # draw random points from opposite hemisphere with enough precip events\n rps = rng.choice(len(hemPos[0]), size = pointsPerList, replace=False)\n # get the respective positions (xpos => add the offset to get the correct point in the 680x1400 mask)\n xposs = hemPos[0][rps]+fr\n yposs = hemPos[1][rps]\n for rp in range(pointsPerList):\n xpos = xposs[rp]\n ypos = yposs[rp]\n # get the count of events at this point\n t_size = evcount[xpos, ypos]\n # randomly select a start point, such that we can obtain case Per Point many samples\n t_begin = rng.integers(low = 0, high=1+t_size-casePerPoint, size = 1)\n # draw samples\n t_list = np.nonzero(exEvs[:,xpos, ypos])[0][t_begin[0]:t_begin[0]+casePerPoint]\n # for each timestamp read whether or not a front exists at the basepoint\n mySampleArray[p, point, li] += np.sum(total_fronts[t_list, xsmp+xoff,ysmp].astype(np.int16), axis = 0) \n # turn in into a ratio\n mySampleArray[p, point, li] /= casePerPoint*pointsPerList\n print(flush=True)\n\n \n with open(os.path.join(\"StatisticalTests\",args.outname,\"myRandSampResults_{}_{}.txt\".format(tgt_season, k)), \"w\") as f:\n # calulate the numpy median and percentiles (just as info)\n for p in range(0,maxPct,stepsize):\n print(p, file = f)\n print(myProbArray[p,:,0], file = f)\n print(np.median(mySampleArray[p], axis=1), file = f)\n print(np.percentile(mySampleArray[p],1, axis=1), file = f)\n print(np.percentile(mySampleArray[p],99, axis=1), file = f)\n # reshape the Probability array (of all frontal probabilites of bp) into a 1d array\n ps = myProbArray.reshape(-1)\n # to get the intercept calculated\n ps = sm.add_constant(ps)\n # for each type of front calculate the 1 and 99 percentile using Quantile Regression\n for x in range(5):\n print(\"for x = \",x)\n # turn the mixed event in a 1d array as well\n exop = mySampleArray[::stepsize,:,:,x].reshape(-1)\n model1 = QuantReg(endog= exop, exog = ps, missing = 'drop')\n result1 = model1.fit(q=0.01, vcov = 'robust', kernel = 'epa', bandwidth = 'hsheather', max_iter = 1000, p_tol=1e-06)\n result2 = model1.fit(q=0.99, vcov = 'robust', kernel = 'epa', bandwidth = 'hsheather', max_iter = 1000, p_tol=1e-06)\n medi = model1.fit(q=0.5, vcov = 'robust', kernel = 'epa', bandwidth = 'hsheather', max_iter = 1000, p_tol=1e-06)\n print(\"0.01pct\", result1.params, file = f)\n print(\"0.50pct\", medi.params, file = f)\n print(\"0.99pct\", result2.params, file = f)\n\n \n\nif __name__ == \"__main__\":\n \n args = parseArguments()\n parOpt = None#setupDevice(args)\n\n name = os.path.join(\"StatisticalTests\",args.outname)\n if(not os.path.isdir(name)):\n os.mkdir(name)\n tmpDataLoc = args.data\n if(not os.path.isdir(args.data)):\n args.data = os.path.dirname(args.data)\n data_set = setupDataset(args) \n # 0 worker, to ignore problems caused by the multiprocessing\n loader = setupDataLoader(data_set, 0)\n # reset the correct data path\n args.data=tmpDataLoc\n \n num_samples = len(loader)\n if(args.num_samples != -1):\n num_samples = args.num_samples\n print(\"Evaluating {} Data files\".format(num_samples))\n with torch.no_grad():\n performInference(loader, num_samples, parOpt, args)\n \n"
] |
[
[
"numpy.fromfile",
"numpy.nonzero",
"numpy.arange",
"numpy.median",
"scipy.ndimage.distance_transform_edt",
"numpy.percentile",
"numpy.concatenate",
"numpy.max",
"torch.no_grad",
"torch.cuda.is_available",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.default_rng"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.7",
"1.0",
"0.10",
"1.2",
"0.14",
"0.19",
"1.5",
"0.12",
"0.17",
"0.13",
"1.6",
"1.4",
"1.9",
"1.3",
"1.10",
"0.15",
"0.18",
"0.16",
"1.8"
],
"tensorflow": []
}
] |
LemonJust/psd95_segmentation
|
[
"5514c19d328b9b9043931e21017d4a28a717def7",
"5514c19d328b9b9043931e21017d4a28a717def7"
] |
[
"src/features/build_features.py",
"src/models/cnn_train_data.py"
] |
[
"from pathlib import Path\nimport numpy as np\nimport tifffile as tif\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pickle\nimport seaborn as sns\nimport requests\n\n# import local package\nfrom src.data.read_data import read_channel\n\n\ndef get_percentiles(data, step=10):\n \"\"\"\n The function to calculate 11 percentiles of data distribution.\n\n Calculates the following percentiles :\n (0 = min, 10 , 20 , 30, 40, 50, 60, 70, 80, 90, 100=max ) for step = 10.\n\n Parameters:\n data (numpy array): distribution to get percentiles from.\n\n Returns:\n numpy array: percentiles\n \"\"\"\n prc_list = np.arange(0, 100 + step, step)\n return np.percentile(data, prc_list)\n\n\ndef get_img_percentiles(channel, files, padding):\n \"\"\"\n The function to calculate 11 percentiles of the intensity distribution for a list of images.\n\n Calculates the following percentiles for each image provided :\n (0 = min, 10 , 20 , 30, 40, 50, 60, 70, 80, 90, 100=max ).\n\n Parameters:\n channel (string): which channel to get. Either \"red\" or \"green\".\n files (array): images to get percentiles for.\n padding (3x3 array of int): how many pixels to crop away on each side, in the format\n [[z_top, z_bottom],[y_top,y_bottom],[x_left,x_right]].\n Returns:\n numpy array : 11 percentiles for each file ( one file in one row)\n \"\"\"\n num_prc = 11\n prc_img = np.zeros((len(files), num_prc))\n for count, file_name in enumerate(files):\n print(count)\n my_img = read_channel(channel, file_name.as_posix(), padding)\n prc_img[count, :] = get_percentiles(my_img)\n return prc_img\n\n\ndef snr(signal, bg):\n \"\"\"\n returns snr as (signal - bg)/bg\n \"\"\"\n return (signal - bg) / bg\n\n\ndef subtract_bg(signal, bg):\n \"\"\"\n returns normalised intensity as (signal - bg)\n \"\"\"\n return signal - bg\n\n\ndef get_bg_stats_as_df(prc, bg_prc, study_id, int_df=None, snr_df=None, snr_hist_df=None):\n \"\"\"\n Specifically for the dictionary data\n prc = syn[study_id]['lost']['prc']\n bg_prc : which prc to use as bg: 0 = min, 1 = 5% , 2 = 10%, 3 = 15%, 4 = 20%\n\n \"\"\"\n all_snr = []\n\n if int_df is None:\n int_df = pd.DataFrame()\n if int_df is None:\n snr_df = pd.DataFrame()\n if snr_hist_df is None:\n snr_hist_df = pd.DataFrame()\n\n intensity_raw = prc[:, -1] # last element = max\n bg = prc[:, bg_prc]\n\n intensity_bg_sub = subtract_bg(intensity_raw, bg)\n the_snr = snr(intensity_raw, bg)\n\n # take percentiles of that distribution\n int_prc = get_percentiles(intensity_bg_sub)\n snr_prc = get_percentiles(the_snr)\n\n # add to df\n int_df[f\"{study_id}\"] = int_prc\n snr_df[f\"{study_id}\"] = snr_prc\n\n new_hist_df = pd.DataFrame({f\"{study_id}\": the_snr})\n snr_hist_df = pd.concat([snr_hist_df, new_hist_df], axis=1)\n\n return int_df, snr_df, snr_hist_df, the_snr\n\n\ndef get_cdf(prc, bg_prc): # , study_id, cdf_df=None):\n \"\"\"\n Specifically for the dictionary data\n prc = syn[study_id]['lost']['prc']\n bg_prc : which prc to use as bg: 0 = min, 1 = 5% , 2 = 10%, 3 = 15%, 4 = 20%\n\n \"\"\"\n int_cdf = np.zeros((51, 1))\n\n # if cdf_df is None:\n # cdf_df = pd.DataFrame()\n\n intensity_raw = prc[:, -1] # last element = max\n bg = prc[:, bg_prc]\n\n intensity_bg_sub = subtract_bg(intensity_raw, bg)\n\n for i_val, int_val in enumerate(range(0, 510, 10)):\n # get percentage of values below int_val\n int_cdf[i_val, 0] = np.mean(intensity_bg_sub < int_val)\n\n # add to df\n # cdf_df[f\"{study_id}\"] = int_cdf\n\n return int_cdf\n\n\ndef get_reduced_cdf(prc, bg_prc, reduce_by): # , study_id, cdf_df=None):\n \"\"\"\n Specifically for the dictionary data\n prc = syn[study_id]['lost']['prc']\n bg_prc : which prc to use as bg: 0 = min, 1 = 5% , 2 = 10%, 3 = 15%, 4 = 20%\n reduce_by : percentage to reduce the intensity\n \"\"\"\n int_cdf = np.zeros((51, 1))\n\n intensity_raw = prc[:, -1] # last element = max\n bg = prc[:, bg_prc]\n\n intensity_bg_sub = subtract_bg(intensity_raw, bg)\n\n snr_thr = min(snr(intensity_raw, bg))\n red_snr = (intensity_bg_sub * (1 - reduce_by / 100)) / bg\n\n # keep only the synapses that got \"lost\" due to reduction\n is_lost = red_snr < snr_thr\n intensity_lost = intensity_bg_sub[is_lost]\n\n # fraction lost\n frc_lost = intensity_lost.shape[0] / intensity_bg_sub.shape[0]\n\n for i_val, int_val in enumerate(range(0, 510, 10)):\n # get percentage of values below int_val\n if intensity_lost.size == 0:\n int_cdf[i_val, 0] = 0\n else:\n int_cdf[i_val, 0] = np.mean(intensity_lost < int_val)\n\n return int_cdf, frc_lost\n",
"import pandas as pd\nfrom pathlib import Path\nimport numpy as np\nimport tifffile as tif\n\nfrom src.features.synapse_features import get_centroids_and_labels, get_one_volume\n\n\nclass CnnTrainingData:\n \"\"\"\n This class performs data consolidation, preprocess and etc. for CNN input\n \"\"\"\n\n def __str__(self):\n return \"training data\"\n\n def __init__(self, data_folder=None, info_csv=None):\n \"\"\"\n Initialises CnnTrainingData object.\n\n Parameters:\n data_folder (string) : path to the data folder,\n Ex. : \"D:/Code/repos/psd95_segmentation/data\"\n info_csv (string) : path to the csv that has img - rid pairs\n image names under 'Source Image' , rid names under 'Syn RID 1'\n \"\"\"\n\n self.data_folder = Path(data_folder)\n self.info_df = pd.read_csv(info_csv)\n\n self.rid = []\n self.img = {}\n\n self.path_rid = {}\n self.path_img = {}\n self.path_nuc = {}\n self.path_npz = {}\n\n self.shape = []\n self.volumes = {}\n self.labels = {}\n\n def append_rids(self, *argv):\n \"\"\"\n Adds rids to the list of data.\n Generate all useful info: img, paths to files TODO : finish this\n \"\"\"\n\n for rid in argv:\n self.rid.append(rid)\n\n img = self.get_img(rid)\n self.img[rid] = img\n\n self.path_rid[rid] = self.generate_path(rid, 'rid')\n self.path_img[rid] = self.generate_path(rid, 'img')\n self.path_nuc[rid] = self.generate_path(rid, 'nuc')\n self.path_npz[rid] = self.generate_path(rid, 'npz')\n\n def get_img(self, rid):\n \"\"\"\n Sets up everything for the chosen rid:\n get's paths to segmentation files, nuclear files, images\n \"\"\"\n img = self.info_df['Source Image'].values[self.info_df['Syn RID 1'] == '1-1E98'][0]\n return img\n\n def generate_path(self, rid, path_type):\n \"\"\"\n Generates path according to the HARDCODED template\n\n Parameters:\n rid (string) : the RID you want to create synapse csv, npz,\n nuclear of image path.\n path_type (string) : what kind of path : 'rid' / 'npz' / 'nuc' / 'img' .\n\n Returns:\n string : path you requested\n \"\"\"\n # template paths\n # FIXME : this won't work if the folder structure/naming convention changes\n rid_path = Path('raw/csv/pallium/syn')\n img_path = Path('raw/img/pallium')\n nuc_path = Path('raw/csv/pallium/nuc')\n npz_path = Path('raw/npz/pallium/syn')\n\n if path_type == 'rid':\n file_name = self.img[rid] + \"_\" + rid + \"_synapses_only.csv\"\n file_path = self.data_folder / rid_path / file_name\n elif path_type == 'img':\n file_name = \"Image_\" + self.img[rid] + \".ome.tif\"\n file_path = self.data_folder / img_path / file_name\n elif path_type == 'nuc':\n file_name = self.img[rid] + \"_nuclei_only.csv\"\n file_path = self.data_folder / nuc_path / file_name\n elif path_type == 'npz':\n file_name = self.img[rid] + \"_\" + rid + \".npz\"\n file_path = self.data_folder / npz_path / file_name\n else:\n ValueError('who should be \"rid\" \"img\" or \"nuc\" ')\n\n return file_path\n\n def get_volumes_and_labels(self, padding, recrop=False):\n \"\"\"\n Crops volumed according to padding around the center.\n\n Parameters:\n padding (list) : half radius of area around the center of synapse to consider.\n In ZYX order, so that [0,7,7] will result in input_shape=(15,15,1)\n \"\"\"\n shape = 2 * np.array(padding) + 1\n\n # either recrop is True of self.volumes is empty:\n # crop volumes for all the entries (empty dictionaries are false)\n if recrop or (not self.volumes):\n self.shape = shape\n for rid in self.rid:\n self.volumes[rid], self.labels[rid] = self.crop_and_label(rid)\n\n # crop volumes only for the new entries\n else:\n assert self.shape == shape, \"Padding has changed, do you want to recrop all the volumes?\"\n\n for rid in self.rid:\n if rid in self.volumes:\n pass\n else:\n self.volumes[rid], self.labels[rid] = self.crop_and_label(rid)\n\n def crop_and_label(self, rid):\n \"\"\"\n For all centroids crop the box around\n \"\"\"\n centroids_tiff, labels = get_centroids_and_labels(self.path_rid[rid], self.path_npz[rid])\n num_centroids = centroids_tiff.shape[0]\n\n img_volume = tif.imread(self.path_img[rid])\n\n cropped_volumes = np.zeros((num_centroids, self.shape[0], self.shape[1], self.shape[2]))\n\n for count, centroid in enumerate(centroids_tiff):\n cropped_volumes[count, :, :, :] = get_one_volume(img_volume, centroid, self.shape)\n\n return cropped_volumes, labels\n\n\n"
] |
[
[
"pandas.concat",
"numpy.arange",
"numpy.percentile",
"pandas.DataFrame",
"numpy.mean",
"numpy.zeros"
],
[
"numpy.array",
"pandas.read_csv",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
marcinjurek/pyMRA
|
[
"896964175e9d373aedfd0b91e074df1932bbabfc"
] |
[
"pyMRA/tests/test-param-est.py"
] |
[
"import pdb\nimport scipy.optimize as opt\nimport logging\nimport numpy as np\nimport sys\nimport scipy.linalg as lng\nfrom scipy.stats import multivariate_normal as mvn\n\nsys.path.append('../..')\n\nfrom pyMRA.MRATree import MRATree\nimport pyMRA.MRATools as mt\n\n\n\nlogging.basicConfig(format='%(asctime)s %(message)s', datefmt='%H:%M:%S',level=logging.INFO)\n\nnp.random.seed(10)\n\n###### set simulation parameters #####\nfrac_obs = 0.4 # the fraction of locations for which observations are available\ndim_x = 30; dim_y = 1\n\nsig = 1.0\nme_scale=0.2\nkappa = 0.05\n\n\n### MRA parameters\nM=2; J=4; r0=2\ncritDepth = M+1\n\n\n\n#filename = '/home/marcin/MRF/data/Exp_Theta0.1_X100_Y100.csv'\n#filename = '/home/marcin/MRF/data/Exp_Theta0.1_X100_Y100_missing_all.csv'\n#filename = '/home/marcin/MRF/data/sat_temps.csv'\nfilename = '/home/marcin/pyMRA/pyMRA/data/small/Exp_Theta0.1_X10_Y10.csv'\nlogging.basicConfig(format='%(asctime)s %(message)s', datefmt='%H:%M:%S',level=logging.INFO)\n \n\n \nwith open(filename, 'r') as ipFile:\n csv = [line.strip().split(',') for line in ipFile.readlines()][1:]\n \nN = len(csv)\n\nlocs=np.zeros((N, 2))\ny_obs = np.zeros((N, 1))\ny = np.zeros((N, 1))\n\n \nfor idx, line in enumerate(csv):\n\n locs[idx, 1] = float(line[0])\n locs[idx, 0] = float(line[1])\n y_obs[idx, 0] = float(line[2]) if line[2]!='NA' else np.NAN\n if len(line)>3:\n y[idx, 0] = float(line[3])\n\nlocs[:,0] = locs[:,0] - np.min(locs[:,0])\nlocs[:,1] = locs[:,1] - np.min(locs[:,1])\n \nlocs[:,0] = locs[:,0]/np.max(locs[:,0])\nlocs[:,1] = locs[:,1]/np.max(locs[:,1])\n\nNx = len(np.unique(locs[:,0])); Ny = len(np.unique(locs[:,1]))\n \nobs_mean = np.nanmean(y_obs)\ny_obs = y_obs - obs_mean\ny = y - obs_mean\n \nobs_inds = np.isfinite(y_obs).flatten()\nR = me_scale\ny_disp = y_obs.reshape((Nx, Ny))\n\n\n\n##### parameter optimization #####\n\ndef MRALikelihood(params):\n\n kappa, sigma, me_scale = params\n cov = lambda _locs1, _locs2: sigma*mt.Matern32(_locs1, _locs2, l=np.abs(kappa))\n mraTree = MRATree(locs, M, J, r0, critDepth, cov, y_obs, me_scale) \n lik = mraTree.getLikelihood()\n return( lik )\n\n\ndef TrueLikelihood(kappa):\n\n cov = lambda _locs1: mt.ExpCovFun(_locs1, _locs1, l=np.abs(kappa))\n obs = y_obs[obs_inds].ravel()\n obs_mat = np.matrix(obs).T\n obs_locs = locs[obs_inds,:]\n varY = cov(obs_locs) + np.eye(len(obs))*me_scale\n\n \n sign, logdet = np.linalg.slogdet(varY)\n const = len(obs)*np.log(2*np.pi)\n quad_form = obs_mat.T*lng.inv(varY)*obs_mat\n\n hand_lik = logdet + quad_form\n \n full_hand_lik = -0.5*( const + hand_lik )\n\n \n model = mvn(mean=np.zeros(len(obs)), cov=varY)\n numpy_lik = model.logpdf(obs)\n\n #print(\"full_hand_lik: %f\" % full_hand_lik)\n #print(\"numpy_lik: %f\" % numpy_lik)\n \n #return(numpy_lik)\n return( logdet + quad_form )\n\n\n\n#logging.info(\"true likelihood: %f\" % TrueLikelihood(kappa))\n#logging.info(\"MRA likelihood: %f\" % MRALikelihood(kappa))\n\n\nxMRA = opt.minimize(MRALikelihood, [0.1, 0.8, 0.6], method='nelder-mead', options={'xtol':1e-3, 'disp':False})\n#xTrue = opt.minimize(TrueLikelihood, [kappa], method='nelder-mead', options={'xtol':1e-1, 'disp':False})\n\nlogging.info(\"kappa: %f,\\n sigma: %f,\\n me_scale: %f\" % tuple(xMRA.x))\n#logging.info(\"True estimate: %f\" % xTrue.x[0])\n\n \n"
] |
[
[
"numpy.matrix",
"numpy.log",
"numpy.abs",
"numpy.random.seed",
"numpy.min",
"numpy.unique",
"numpy.linalg.slogdet",
"numpy.isfinite",
"numpy.max",
"scipy.optimize.minimize",
"numpy.nanmean",
"scipy.linalg.inv",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.12",
"0.14",
"0.15"
],
"tensorflow": []
}
] |
KanHatakeyama/ion_predictor
|
[
"cc3fd4366a70726c7479685485fe02b74b526647"
] |
[
"ion_manager/ion_predictor/ml/pretrain_descriptors.py"
] |
[
"\nimport pandas as pd\nimport numpy as np\nimport joblib\nimport random\nfrom ..chem_utils.Autodescriptor import MordredDescriptor\nfrom tqdm import tqdm\nfrom sklearn.preprocessing import StandardScaler\nimport yaml\n\nrandom.seed(0)\n\n\ndef dump(\n settings\n):\n \"\"\"\n automatically calculate descriptors for training\n from smiles data (path) to descriptor data (descriptor path)\n\n\n \"\"\"\n\n path = settings[\"pretrain_smiles_path\"]\n num_learning_molecules = settings[\"num_learning_molecules\"]\n descriptor_path = settings[\"descriptor_path\"]\n smiles_path = settings[\"smiles_path\"]\n\n # read smiles list\n smiles_list = list(pd.read_csv(path)[\"SMILES\"].values)\n\n if num_learning_molecules == -1:\n num_learning_molecules = len(smiles_list)\n\n # select random ones\n smiles_list = random.sample(smiles_list, num_learning_molecules)\n\n #########\n # calc descriptors\n\n # init descriptor module\n desc_calcualtor = MordredDescriptor()\n desc_calcualtor.dict_mode = False\n\n # calcilaion. it takes time\n descriptor_list = [desc_calcualtor.calc(sm) for sm in tqdm(smiles_list)]\n\n # scaling\n scaler = StandardScaler()\n descriptor_array = scaler.fit_transform(np.array(descriptor_list))\n\n # save\n joblib.dump(descriptor_array, descriptor_path, compress=9)\n joblib.dump(smiles_list, smiles_path, compress=9)\n"
] |
[
[
"sklearn.preprocessing.StandardScaler",
"numpy.array",
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
numb3r3/runx
|
[
"7745dd3a2994a444352e12ba4afaec5ed112b743"
] |
[
"runx/logx.py"
] |
[
"\"\"\"\nCopyright 2020 Nvidia Corporation\n\nRedistribution and use in source and binary forms, with or without\nmodification, are permitted provided that the following conditions are met:\n\n1. Redistributions of source code must retain the above copyright notice, this\n list of conditions and the following disclaimer.\n\n2. Redistributions in binary form must reproduce the above copyright notice,\n this list of conditions and the following disclaimer in the documentation\n and/or other materials provided with the distribution.\n\n3. Neither the name of the copyright holder nor the names of its contributors\n may be used to endorse or promote products derived from this software\n without specific prior written permission.\n\nTHIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\nAND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\nIMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\nARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\nLIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\nCONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\nSUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\nINTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\nCONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\nARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\nPOSSIBILITY OF SUCH DAMAGE.\n\"\"\"\nfrom collections import defaultdict\nfrom contextlib import contextmanager\nfrom shutil import copyfile\n\nimport csv\nimport os\nimport re\nimport shlex\nimport subprocess\nimport time\n\n\ntry:\n from torch.utils.tensorboard import SummaryWriter\nexcept ModuleNotFoundError:\n from tensorboardX import SummaryWriter\n\nimport torch\n\ntry:\n from .utils import (get_logroot, save_hparams, trn_names, val_names,\n ConditionalProxy)\nexcept ImportError:\n # This is to allow the unit tests to run properly\n from utils import (get_logroot, save_hparams, trn_names, val_names,\n ConditionalProxy)\n\n\ndef is_list(x):\n return isinstance(x, (list, tuple))\n\n\ndef get_gpu_utilization_pct():\n '''\n Use nvidia-smi to capture the GPU utilization, which is reported as an\n integer in range 0-100.\n '''\n util = subprocess.check_output(\n shlex.split('nvidia-smi --query-gpu=\"utilization.gpu\" '\n '--format=csv,noheader,nounits -i 0'))\n util = util.decode('utf-8')\n util = util.replace('\\n', '')\n return int(util)\n\n\nclass LogX(object):\n def __init__(self, rank=0):\n self.initialized = False\n\n def initialize(self, logdir=None, coolname=False, hparams=None,\n tensorboard=False, no_timestamp=False, global_rank=0,\n eager_flush=True):\n '''\n Initialize logx\n\n inputs\n - logdir - where to write logfiles\n - tensorboard - whether to write to tensorboard file\n - global_rank - must set this if using distributed training, so we only\n log from rank 0\n - coolname - generate a unique directory name underneath logdir, else\n use logdir as output directory\n - hparams - only use if not launching jobs with runx, which also saves\n the hparams.\n - eager_flush - call `flush` after every tensorboard write\n '''\n self.rank0 = (global_rank == 0)\n self.initialized = True\n\n if logdir is not None:\n self.logdir = logdir\n else:\n logroot = get_logroot()\n if coolname:\n from coolname import generate_slug\n self.logdir = os.path.join(logroot, generate_slug(2))\n else:\n self.logdir = os.path.join(logroot, 'default')\n\n if self.rank0:\n # confirm target log directory exists\n if not os.path.isdir(self.logdir):\n os.makedirs(self.logdir, exist_ok=True)\n\n # save hparams\n if hparams is not None:\n save_hparams(hparams, self.logdir)\n\n # Tensorboard file\n if tensorboard:\n self.tb_writer = SummaryWriter(log_dir=self.logdir,\n flush_secs=1)\n else:\n self.tb_writer = None\n\n self.eager_flush = eager_flush\n\n # This allows us to use the tensorboard with automatic checking of both\n # the `tensorboard` condition, as well as ensuring writes only happen\n # on rank0. Any function supported by `SummaryWriter` is supported by\n # `ConditionalProxy`. Additionally, flush will be called after any call\n # to this.\n self.tensorboard = ConditionalProxy(\n self.tb_writer,\n tensorboard and self.rank0,\n post_hook=self._flush_tensorboard,\n )\n\n if not self.rank0:\n return\n\n # Metrics file\n metrics_fn = os.path.join(self.logdir, 'metrics.csv')\n self.metrics_fp = open(metrics_fn, mode='a+')\n self.metrics_writer = csv.writer(self.metrics_fp, delimiter=',')\n\n # Log file\n log_fn = os.path.join(self.logdir, 'logging.log')\n self.log_file = open(log_fn, mode='a+')\n\n # save metric\n self.save_metric = None\n self.best_metric = None\n self.save_ckpt_fn = ''\n # Find the existing best checkpoint, and update `best_metric`,\n # if available\n self.best_ckpt_fn = self.get_best_checkpoint() or ''\n if self.best_ckpt_fn:\n best_chk = torch.load(self.best_ckpt_fn, map_location='cpu')\n self.best_metric = best_chk.get('__metric', None)\n self.epoch = defaultdict(lambda: 0)\n self.no_timestamp = no_timestamp\n\n # Initial timestamp, so that epoch time calculation is correct\n phase = 'start'\n csv_line = [phase]\n\n # add epoch/iter\n csv_line.append('{}/step'.format(phase))\n csv_line.append(0)\n\n # add timestamp\n if not self.no_timestamp:\n # this feature is useful for testing\n csv_line.append('timestamp')\n csv_line.append(time.time())\n\n self.metrics_writer.writerow(csv_line)\n self.metrics_fp.flush()\n\n def __del__(self):\n if self.initialized and self.rank0:\n self.metrics_fp.close()\n self.log_file.close()\n\n def msg(self, msg):\n '''\n Print out message to std and to a logfile\n '''\n if not self.rank0:\n return\n\n print(msg)\n \n if hasattr(self, \"log_file\"):\n self.log_file.write(msg + '\\n')\n self.log_file.flush()\n\n def add_image(self, path, img, step=None):\n '''\n Write an image to the tensorboard file\n '''\n self.tensorboard.add_image(path, img, step)\n\n def add_scalar(self, name, val, idx):\n '''\n Write a scalar to the tensorboard file\n '''\n self.tensorboard.add_scalar(name, val, idx)\n\n def _flush_tensorboard(self):\n if self.eager_flush and self.tb_writer is not None:\n self.tb_writer.flush()\n\n @contextmanager\n def suspend_flush(self, flush_at_end=True):\n prev_flush = self.eager_flush\n self.eager_flush = False\n yield\n self.eager_flush = prev_flush\n if flush_at_end:\n self._flush_tensorboard()\n\n def metric(self, phase, metrics, epoch=None):\n \"\"\"Record train/val metrics. This serves the dual-purpose to write these\n metrics to both a tensorboard file and a csv file, for each parsing by\n sumx.\n\n Arguments:\n phase: 'train' or 'val'. sumx will only summarize val metrics.\n metrics: dictionary of metrics to record\n global_step: (optional) epoch or iteration number\n \"\"\"\n if (not self.rank0) or (not hasattr(self, \"metrics_writer\")):\n return\n\n # define canonical phase\n if phase in trn_names:\n canonical_phase = 'train'\n elif phase in val_names:\n canonical_phase = 'val'\n else:\n raise('expected phase to be one of {} {}'.format(str(val_names,\n trn_names)))\n\n if epoch is not None:\n self.epoch[canonical_phase] = epoch\n\n # Record metrics to csv file\n csv_line = [canonical_phase]\n for k, v in metrics.items():\n csv_line.append(k)\n csv_line.append(v)\n\n # add epoch/iter\n csv_line.append('epoch')\n csv_line.append(self.epoch[canonical_phase])\n\n # add timestamp\n if not self.no_timestamp:\n # this feature is useful for testing\n csv_line.append('timestamp')\n csv_line.append(time.time())\n\n # To save a bit of disk space, only save validation metrics\n if canonical_phase == 'val':\n self.metrics_writer.writerow(csv_line)\n self.metrics_fp.flush()\n\n # Write updates to tensorboard file\n with self.suspend_flush():\n for k, v in metrics.items():\n self.add_scalar('{}/{}'.format(phase, k), v,\n self.epoch[canonical_phase])\n\n # if no step, then keep track of it automatically\n if epoch is None:\n self.epoch[canonical_phase] += 1\n\n @staticmethod\n def is_better(save_metric, best_metric, higher_better):\n return best_metric is None or \\\n higher_better and (save_metric > best_metric) or \\\n not higher_better and (save_metric < best_metric)\n\n def save_model(self, save_dict, metric, epoch, higher_better=True,\n delete_old=True):\n \"\"\"Saves a model to disk. Keeps a separate copy of latest and best models.\n\n Arguments:\n save_dict: dictionary to save to checkpoint\n epoch: epoch number, used to name checkpoint\n metric: metric value to be used to evaluate whether this is the\n best result\n higher_better: True if higher valued metric is better, False\n otherwise\n delete_old: Delete prior 'lastest' checkpoints. By setting to\n false, you'll get a checkpoint saved every time this\n function is called.\n \"\"\"\n if not self.rank0:\n return\n\n save_dict['__metric'] = metric\n\n if os.path.exists(self.save_ckpt_fn) and delete_old:\n os.remove(self.save_ckpt_fn)\n # Save out current model\n self.save_ckpt_fn = os.path.join(\n self.logdir, 'last_checkpoint_ep{}.pth'.format(epoch))\n torch.save(save_dict, self.save_ckpt_fn)\n self.save_metric = metric\n\n is_better = self.is_better(self.save_metric, self.best_metric,\n higher_better)\n if is_better:\n if os.path.exists(self.best_ckpt_fn):\n os.remove(self.best_ckpt_fn)\n self.best_ckpt_fn = os.path.join(\n self.logdir, 'best_checkpoint_ep{}.pth'.format(epoch))\n self.best_metric = self.save_metric\n copyfile(self.save_ckpt_fn, self.best_ckpt_fn)\n return is_better\n\n def get_best_checkpoint(self):\n \"\"\"\n Finds the checkpoint in `self.logdir` that is considered best.\n\n If, for some reason, there are multiple best checkpoint files, then\n the one with the highest epoch will be preferred.\n\n Returns:\n None - If there is no best checkpoint file\n path (str) - The full path to the best checkpoint otherwise.\n \"\"\"\n match_str = r'^best_checkpoint_ep([0-9]+).pth$'\n best_epoch = -1\n best_checkpoint = None\n for filename in os.listdir(self.logdir):\n match = re.fullmatch(match_str, filename)\n if match is not None:\n # Extract the epoch number\n epoch = int(match.group(1))\n if epoch > best_epoch:\n best_epoch = epoch\n best_checkpoint = filename\n\n if best_checkpoint is None:\n return None\n return os.path.join(self.logdir, best_checkpoint)\n\n def load_model(self, path):\n \"\"\"Restore a model and return a dict with any meta data included in\n the snapshot\n \"\"\"\n checkpoint = torch.load(path)\n state_dict = checkpoint['state_dict']\n meta = {k: v for k, v in checkpoint.items() if k != 'state_dict'}\n return state_dict, meta\n\n\n# Importing logx gives you access to this shared object\nlogx = LogX()\n"
] |
[
[
"torch.load",
"torch.save"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
NathanRiviere/Recipe-Recommender
|
[
"f0a7266b07899cbeeaa001e889305822d9c13a8d"
] |
[
"src/train.py"
] |
[
"import argparse\nfrom datetime import datetime\nimport numpy as np\nimport os\nimport os.path\nfrom sklearn.model_selection import train_test_split\nimport torch\nfrom torch.utils.data import Dataset, DataLoader\n\nfrom helpers import *\nfrom ctrmf import CTRMF\n\n\n# Parse input arguments\nap = argparse.ArgumentParser()\nap.add_argument(\"-d\", \"--dataset\", required=True,\n\t\t\t\thelp=\"path to input dataset\")\nap.add_argument(\"-e\", \"--epochs\", type=int, default=25,\n\t\t\t\thelp=\"# of epochs to train our network for\")\nap.add_argument(\"-p\", \"--plot\", type=str, default=\"plot.png\",\n\t\t\t\thelp=\"path to output loss/accuracy plot\")\nap.add_argument(\"-s\", \"--save\", action=\"store_true\", \\\n help=\"add flag if you want to save the model after training\")\n\nargs = vars(ap.parse_args())\n\n# Set batch size\nbatch_size = 5000\n\n# Use GPU (cuda) if available\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n\n# Create RatingDataset class to help with loading data\nclass RatingDataset(Dataset):\n\tdef __init__(self, user_ratings):\n\t\tself.user_ratings = user_ratings\n\t\tself.row_indices, self.col_indices = user_ratings.nonzero()\n\n\tdef __len__(self):\n\t\treturn len(self.row_indices)\n\n\tdef __getitem__(self, idx):\n\t\trow = self.row_indices[idx]\n\t\tcol = self.col_indices[idx]\n\t\trating = self.user_ratings[row, col]\n\t\treturn torch.LongTensor([row]).to(device), \\\n\t\t\t\ttorch.LongTensor([col]).to(device), \\\n\t\t\t\ttorch.FloatTensor([rating]).to(device)\n\n# Load the user-ratings and recipe-feature matrices\nrecipe_feature = get_recipe_feature_map(os.path.join(args['dataset'], 'generated-results', 'Recipe-feature_map.npy')).T\nuser_rating = get_recipe_feature_map(os.path.join(args['dataset'], 'generated-results', 'user-rating_matrix.npy'))\n\n# Split data into test and training sets\nur_train, ur_test, rf_train, rf_test = split_to_train_test(user_rating, recipe_feature, .2)\n\nur_train_indices = list(zip(ur_train.nonzero()))\n\n# Create two loader objects for the training and test datasets\nbatch_size = 1000\ntrain_dataloader = DataLoader(RatingDataset(ur_train), batch_size=batch_size, shuffle=True)\ntest_dataloader = DataLoader(RatingDataset(ur_test), batch_size=batch_size, shuffle=True)\n\n# Instantiate the model\nmodel = CTRMF(\n\tuser_rating,\n\trecipe_feature,\n\tdevice=device,\n\tverbose=True\n).to(device)\n\n# Use MSE as the loss function\nloss_func = torch.nn.MSELoss()\n\n# Use SGD to optimize the weights\noptimizer = torch.optim.SGD(model.parameters(), lr=0.1)\n\n# Find non-zero indices to train on\nratings_users, ratings_recipes = ur_train.nonzero()\n\ntrain_mse = []\ntest_mse = []\n\nprint(f\"beginning training... batch size={batch_size}\")\n\n# Train model:\nfor epoch in range(args['epochs']):\n\ttrain_loss_tot = 0\n\ttrain_count = 0\n\tfor i, (row_batch, col_batch, rating_batch) in enumerate(train_dataloader):\n\t\ttrain_count += 1\n\t\toptimizer.zero_grad()\n\n\t\t# Predict rating and calculate loss\n\t\tprediction = model(row_batch.squeeze(), col_batch.squeeze())\n\t\tprediction = torch.diagonal(prediction)\n\t\tloss = loss_func(prediction, rating_batch.squeeze())\n\n\t\t# Backpropagate\n\t\tloss.backward()\n\n\t\t# Update the parameters\n\t\toptimizer.step()\n\n\t\t# Update loss total\n\t\ttrain_loss_tot += loss.item()\n\n\ttest_loss_tot = 0\n\ttest_count = 0\n\twith torch.no_grad():\n\t\tfor i, (row_batch, col_batch, rating_batch) in enumerate(test_dataloader):\n\t\t\ttest_count += 1\n\n\t\t\t# Predict rating and calculate loss\n\t\t\tprediction = model(row_batch.squeeze(), col_batch.squeeze())\n\t\t\tprediction = torch.diagonal(prediction)\n\t\t\tloss = loss_func(prediction, rating_batch.squeeze())\n\n\t\t\t# Update loss total\n\t\t\ttest_loss_tot += loss.item()\n\n\ttrain_mse += [train_loss_tot / train_count]\n\ttest_mse += [test_loss_tot / test_count]\n\tprint('[epoch:{}] Train MSE: {}, Test MSE: {}'.format(\n\t\tepoch,\n\t\ttrain_mse[-1],\n\t\ttest_mse[-1]\n\t))\n\nprint('Finished training!')\n#plot_learning_curve_2(list(range(args['epochs'])), train_mse, test_mse, args['plot'])\n\nif args['save'] is True:\n save_model(model, '../models/model.pkl')\n"
] |
[
[
"torch.LongTensor",
"torch.diagonal",
"torch.no_grad",
"torch.FloatTensor",
"torch.cuda.is_available",
"torch.nn.MSELoss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bcha92/ML_Training_Template
|
[
"e76eeb58f440f3eb2fb35e2df87033e7473c6f91"
] |
[
"1S_regression_model_selection/decision_tree_regression.py"
] |
[
"# Decision Tree Regression\n\n# Importing the libraries\nimport numpy as np\n# no visual graph in this template, plt is commented out as such\n# import matplotlib.pyplot as plt\nimport pandas as pd\n\n# Importing the dataset\ndataset = pd.read_csv('Data.csv')\nX = dataset.iloc[:, :-1].values\ny = dataset.iloc[:, -1].values\n\n# Splitting the dataset into the Training set and Test set\nfrom sklearn.model_selection import train_test_split\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)\n\n# Training the Decision Tree Regression model on the Training set\nfrom sklearn.tree import DecisionTreeRegressor\nregressor = DecisionTreeRegressor(random_state = 0)\nregressor.fit(X_train, y_train)\n\n# Predicting the Test set results\ny_pred = regressor.predict(X_test)\nnp.set_printoptions(precision=2)\nprint(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))\n\n# Evaluating the Model Performance\nfrom sklearn.metrics import r2_score\nr2_score(y_test, y_pred)"
] |
[
[
"pandas.read_csv",
"sklearn.metrics.r2_score",
"sklearn.tree.DecisionTreeRegressor",
"numpy.set_printoptions",
"sklearn.model_selection.train_test_split"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
Stranger469/ARS2
|
[
"b6d62b66997180abe01b676d5359c20daa42b7ad",
"b6d62b66997180abe01b676d5359c20daa42b7ad"
] |
[
"wrench/classification/denoise.py",
"wrench/labelmodel/gold.py"
] |
[
"import logging\nfrom typing import Any, Optional, Union, Callable\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.utils.data import DataLoader\nfrom tqdm.auto import trange\nfrom transformers import AutoTokenizer\n\nfrom ..backbone import BackBone\nfrom ..basemodel import BaseTorchClassModel, BaseLabelModel\nfrom ..config import Config\nfrom ..dataset import BaseDataset\nfrom ..dataset.utils import split_labeled_unlabeled\n\nlogger = logging.getLogger(__name__)\n\n\nclass AttentionModel(nn.Module):\n def __init__(self, input_size, n_rules, hidden_size, n_class):\n super(AttentionModel, self).__init__()\n self.n_class = n_class\n self.encoder = nn.Sequential(\n nn.Linear(input_size, hidden_size),\n nn.Tanh(),\n nn.Linear(hidden_size, n_rules)\n )\n\n def forward(self, x_lf, batch):\n x_l = batch['features'].to(x_lf.device)\n x = torch.cat((x_lf, x_l), 1)\n z = self.encoder(x)\n score = F.softmax(z, dim=1)\n mask = (x_lf >= 0).float()\n coverage_score = score * mask\n\n score_matrix = torch.empty(len(x_lf), self.n_class, device=x_lf.device)\n for k in range(self.n_class):\n score_matrix[:, k] = (score * (x_lf == k).float()).sum(dim=1)\n\n return score_matrix, coverage_score\n\n\nclass AssembleModel(BackBone):\n def __init__(self, input_size, n_rules, hidden_size, n_class, backbone):\n super(AssembleModel, self).__init__(n_class=n_class)\n self.backbone = backbone\n self.attention = AttentionModel(input_size + n_rules, n_rules, hidden_size, n_class)\n\n def forward(self, batch_l, batch_u, x_lf_l, x_lf_u):\n predict_l = self.backbone(batch_l)\n predict_u = self.backbone(batch_u)\n\n lf_y_l, all_scores = self.attention(x_lf_l, batch_l)\n fix_score = F.softmax(torch.mean(all_scores, dim=0), dim=0) # use the average as the fixed score\n\n lf_y_u = torch.zeros((x_lf_u.size(0), self.n_class), dtype=torch.float, device=self.get_device())\n for k in range(self.n_class):\n lf_y_u[:, k] = (fix_score.unsqueeze(0).repeat([x_lf_u.size(0), 1]) * (x_lf_u == k).float()).sum(dim=1)\n lf_y_u /= torch.sum(lf_y_u, dim=1).unsqueeze(1)\n lf_y_u = torch.nan_to_num(lf_y_u) # handle the 'nan' (divided by 0) problem\n lf_y_u = F.log_softmax(lf_y_u, dim=1).detach()\n\n return predict_l, predict_u, lf_y_l, lf_y_u, fix_score.detach()\n\n\nclass Denoise(BaseTorchClassModel):\n def __init__(self,\n alpha: Optional[float] = 0.6,\n c1: Optional[float] = 0.2,\n c2: Optional[float] = 0.7,\n hidden_size: Optional[int] = 100,\n\n batch_size: Optional[int] = 16,\n real_batch_size: Optional[int] = 16,\n test_batch_size: Optional[int] = 16,\n n_steps: Optional[int] = 10000,\n grad_norm: Optional[float] = -1,\n use_lr_scheduler: Optional[bool] = False,\n binary_mode: Optional[bool] = False,\n **kwargs: Any\n ):\n super().__init__()\n self.hyperparas = {\n 'alpha' : alpha,\n 'c1' : c1,\n 'c2' : c2,\n 'hidden_size' : hidden_size,\n\n 'batch_size' : batch_size,\n 'real_batch_size' : real_batch_size,\n 'test_batch_size' : test_batch_size,\n 'n_steps' : n_steps,\n 'grad_norm' : grad_norm,\n 'use_lr_scheduler': use_lr_scheduler,\n 'binary_mode' : binary_mode,\n }\n self.model: Optional[AssembleModel] = None\n self.label_model: Optional[BaseLabelModel] = None\n self.config = Config(\n self.hyperparas,\n use_optimizer=True,\n use_lr_scheduler=use_lr_scheduler,\n use_backbone=True,\n use_label_model=True,\n **kwargs\n )\n self.is_bert = self.config.backbone_config['name'] == 'BERT'\n if self.is_bert:\n self.tokenizer = AutoTokenizer.from_pretrained(self.config.backbone_config['paras']['model_name'])\n\n def fit(self,\n dataset_train: BaseDataset,\n dataset_valid: Optional[BaseDataset] = None,\n y_valid: Optional[np.ndarray] = None,\n cut_tied: Optional[bool] = False,\n valid_mode: Optional[str] = 'feature',\n evaluation_step: Optional[int] = 100,\n metric: Optional[Union[str, Callable]] = 'acc',\n direction: Optional[str] = 'auto',\n patience: Optional[int] = 20,\n tolerance: Optional[float] = -1.0,\n device: Optional[torch.device] = None,\n verbose: Optional[bool] = True,\n **kwargs: Any):\n\n if not verbose:\n logger.setLevel(logging.ERROR)\n\n config = self.config.update(**kwargs)\n hyperparas = self.config.hyperparas\n logger.info(config)\n\n n_steps = hyperparas['n_steps']\n if hyperparas['real_batch_size'] == -1 or hyperparas['batch_size'] < hyperparas['real_batch_size'] or not self.is_bert:\n hyperparas['real_batch_size'] = hyperparas['batch_size']\n accum_steps = hyperparas['batch_size'] // hyperparas['real_batch_size']\n\n alpha, c1, c2 = hyperparas['alpha'], hyperparas['c1'], hyperparas['c2']\n\n n_rules = dataset_train.n_lf\n n_class = dataset_train.n_class\n\n backbone = self._init_model(\n dataset=dataset_train,\n n_class=dataset_train.n_class,\n config=config,\n is_bert=self.is_bert\n )\n model = AssembleModel(\n input_size=dataset_train.features.shape[1],\n n_rules=n_rules,\n hidden_size=hyperparas['hidden_size'],\n n_class=n_class,\n backbone=backbone\n )\n self.model = model.to(device)\n\n optimizer, scheduler = self._init_optimizer_and_lr_scheduler(model, config)\n\n labeled_dataset, unlabeled_dataset = split_labeled_unlabeled(dataset_train, cut_tied=cut_tied)\n labeled_dataloader = self._init_train_dataloader(\n labeled_dataset,\n n_steps=n_steps,\n config=config,\n return_features=True,\n return_weak_labels=True,\n )\n\n unlabeled_dataloader = self._init_train_dataloader(\n unlabeled_dataset,\n n_steps=n_steps,\n config=config,\n return_features=True,\n return_weak_labels=True,\n )\n\n label_model = self._init_label_model(config)\n label_model.fit(dataset_train=dataset_train, dataset_valid=dataset_valid, verbose=verbose)\n self.label_model = label_model\n all_y_l = torch.LongTensor(label_model.predict(labeled_dataset)).to(device)\n all_Z = torch.zeros(len(unlabeled_dataset), n_class, dtype=torch.float).to(device)\n all_z = torch.zeros(len(unlabeled_dataset), n_class, dtype=torch.float).to(device)\n\n valid_flag = self._init_valid_step(\n dataset_valid,\n y_valid,\n metric,\n direction,\n patience,\n tolerance,\n return_features=True,\n return_weak_labels=True,\n )\n\n history = {}\n last_step_log = {}\n try:\n with trange(n_steps, desc=\"[TRAIN] Denoise\", unit=\"steps\", disable=not verbose, ncols=150, position=0, leave=True) as pbar:\n cnt = 0\n step = 0\n model.train()\n optimizer.zero_grad()\n for labeled_batch, unlabeled_batch in zip(labeled_dataloader, unlabeled_dataloader):\n\n x_lf_l = labeled_batch['weak_labels'].to(device)\n x_lf_u = unlabeled_batch['weak_labels'].to(device)\n idx_l = labeled_batch['ids'].long().to(device)\n idx_u = unlabeled_batch['ids'].long().to(device)\n y_l = all_y_l.index_select(0, idx_l)\n Z = all_Z.index_select(0, idx_u)\n z = all_z.index_select(0, idx_u)\n\n predict_l, predict_u, lf_y_l, lf_y_u, fix_score = model(labeled_batch, unlabeled_batch, x_lf_l, x_lf_u)\n\n loss_sup = F.cross_entropy(predict_l, y_l)\n loss_sup_weight = F.cross_entropy(lf_y_l, y_l)\n\n loss_unsup = torch.FloatTensor([0.0]).to(device)\n if step > 100:\n loss_unsup = ((z - predict_u) ** 2).mean()\n outputs = predict_u.data.clone()\n Z = alpha * Z + (1. - alpha) * outputs\n all_z[idx_u] = Z * (1. / (1. - alpha ** (step + 1)))\n all_Z[idx_u] = Z\n\n loss = c1 * loss_sup_weight + c2 * loss_sup + (1 - c2 - c1) * loss_unsup\n loss.backward()\n cnt += 1\n\n if cnt % accum_steps == 0:\n if hyperparas['grad_norm'] > 0:\n nn.utils.clip_grad_norm_(model.parameters(), hyperparas['grad_norm'])\n optimizer.step()\n if scheduler is not None:\n scheduler.step()\n optimizer.zero_grad()\n step += 1\n\n # --- update y_l ---\n lf_y_l_new = torch.zeros((x_lf_l.size(0), n_class), dtype=torch.float).to(device)\n for k in range(n_class):\n lf_y_l_new[:, k] = (fix_score.unsqueeze(0).repeat([x_lf_l.size(0), 1]) * (x_lf_l == k).float()).sum(dim=1)\n lf_y_l_new /= torch.sum(lf_y_l_new, dim=1).unsqueeze(1)\n lf_y_l_new[lf_y_l_new != lf_y_l_new] = 0 # handle the 'nan' (divided by 0) problem\n lf_y_l_new = F.log_softmax(lf_y_l_new, dim=1).detach()\n all_y_l[idx_l] = lf_y_l_new.max(1)[1]\n\n if valid_flag and step % evaluation_step == 0:\n metric_value, early_stop_flag, info = self._valid_step(step, mode=valid_mode)\n if early_stop_flag:\n logger.info(info)\n break\n\n history[step] = {\n 'loss' : loss.item(),\n 'loss_sup' : loss_sup.item(),\n 'loss_sup_weight' : loss_sup_weight.item(),\n 'loss_unsup' : loss_unsup.item(),\n f'val_{metric}' : metric_value,\n f'best_val_{metric}': self.best_metric_value,\n 'best_step' : self.best_step,\n }\n last_step_log.update(history[step])\n\n last_step_log['loss'] = loss.item()\n last_step_log['loss_sup'] = loss_sup.item()\n last_step_log['loss_sup_weight'] = loss_sup_weight.item()\n last_step_log['loss_unsup'] = loss_unsup.item()\n pbar.update()\n pbar.set_postfix(ordered_dict=last_step_log)\n\n if step >= n_steps:\n break\n\n except KeyboardInterrupt:\n logger.info(f'KeyboardInterrupt! do not terminate the process in case need to save the best model')\n\n self._finalize()\n\n return history\n\n @torch.no_grad()\n def predict_proba(self, dataset: Union[BaseDataset, DataLoader], mode: Optional[str] = 'feature',\n device: Optional[torch.device] = None, **kwargs: Any):\n assert mode in ['ensemble', 'feature', 'rules'], f'mode: {mode} not support!'\n if device is not None:\n model = self.model.to(device)\n else:\n model = self.model\n device = model.get_device()\n model.eval()\n if isinstance(dataset, BaseDataset):\n valid_dataloader = self._init_valid_dataloader(\n dataset,\n return_features=True,\n return_weak_labels=True,\n )\n else:\n valid_dataloader = dataset\n probas = []\n for batch in valid_dataloader:\n if mode == 'ensemble':\n output1, x = model.backbone(batch, return_features=True)\n prob_feature = F.softmax(output1, dim=-1)\n x_lf = batch['weak_labels'].to(device)\n output2, _ = model.attention(x_lf, batch)\n prob_weak_labels = F.softmax(output2, dim=-1)\n\n max_prob_feature = torch.max(prob_feature, dim=-1)[0]\n max_prob_weak_labels = torch.max(prob_weak_labels, dim=-1)[0]\n mask = torch.unsqueeze((max_prob_feature > max_prob_weak_labels).long(), dim=1)\n\n proba = mask * prob_feature + (1 - mask) * prob_weak_labels\n elif mode == 'feature':\n output1 = model.backbone(batch)\n proba = F.softmax(output1, dim=-1)\n elif mode == 'rules':\n _, x = model.backbone(batch, return_features=True)\n x_lf = batch['weak_labels'].to(device)\n output2, _ = model.attention(x_lf, x)\n proba = F.softmax(output2, dim=-1)\n else:\n raise NotImplementedError\n\n probas.append(proba.cpu().numpy())\n\n return np.vstack(probas)\n",
"import logging\nfrom collections import Counter\nfrom typing import Any, Optional, Union\n\nimport numpy as np\nfrom snorkel.labeling import LFAnalysis\n\nfrom ..basemodel import BaseLabelModel\nfrom ..dataset import BaseDataset\nfrom ..dataset.utils import check_weak_labels\n\nlogger = logging.getLogger(__name__)\n\nABSTAIN = -1\n\n\nclass GoldCondProb(BaseLabelModel):\n def __init__(self, **kwargs: Any):\n super().__init__()\n self.cond_probs = None\n self.n_class = None\n self.balance = None\n\n def fit(self,\n dataset_train: BaseDataset,\n dataset_valid: Optional[BaseDataset] = None,\n use_prior: Optional[bool] = True,\n **kwargs: Any):\n if np.all(np.array(dataset_train.labels) == -1):\n dataset_train = dataset_valid\n L = check_weak_labels(dataset_train)\n Y = np.array(dataset_train.labels)\n class_counts = Counter(Y)\n sorted_counts = np.array([v for k, v in sorted(class_counts.items())])\n self.n_class = len(sorted_counts)\n if use_prior:\n self.balance = sorted_counts / sum(sorted_counts)\n else:\n self.balance = np.ones(self.n_class)\n self.cond_probs = LFAnalysis(L).lf_empirical_probs(Y, self.n_class)\n\n def predict_proba(self, dataset: Union[BaseDataset, np.ndarray], weight: Optional[np.ndarray] = None,\n **kwargs: Any) -> np.ndarray:\n L = check_weak_labels(dataset)\n n, m = L.shape\n L_shift = L + 1 # convert to {0, 1, ..., k}\n L_aug = np.zeros((n, m * self.n_class))\n for y in range(1, self.n_class + 1):\n # A[x::y] slices A starting at x at intervals of y\n # e.g., np.arange(9)[0::3] == np.array([0,3,6])\n L_aug[:, (y - 1):: self.n_class] = np.where(L_shift == y, 1, 0)\n mu = self.cond_probs[:, 1:, :].reshape(-1, self.n_class)\n mu_eps = min(0.01, 1 / 10 ** np.ceil(np.log10(n)))\n mu = np.clip(mu, mu_eps, 1 - mu_eps)\n jtm = np.ones(L_aug.shape[1])\n # Note: We omit abstains, effectively assuming uniform distribution here\n X = np.exp(L_aug @ np.diag(jtm) @ np.log(mu) + np.log(self.balance))\n Z = np.tile(X.sum(axis=1).reshape(-1, 1), self.n_class)\n return X / Z\n"
] |
[
[
"torch.mean",
"torch.nn.functional.softmax",
"torch.max",
"torch.nn.functional.log_softmax",
"torch.cat",
"torch.nn.functional.cross_entropy",
"torch.nan_to_num",
"torch.sum",
"torch.nn.Tanh",
"torch.nn.Linear",
"torch.no_grad",
"torch.FloatTensor",
"numpy.vstack"
],
[
"numpy.diag",
"numpy.log",
"numpy.clip",
"numpy.ones",
"numpy.log10",
"numpy.array",
"numpy.zeros",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
dvp2015/nuclide-data
|
[
"6f33831220b5f9cb9e82bb7917bfb53dee2c8bc3"
] |
[
"test_nuclide_data.py"
] |
[
"#!/usr/bin/env python\n\"\"\"\nTests for nuclide_data\n\n\"\"\"\n\nimport numpy as np\nimport uncertainties as unc\nimport unittest\nimport nuclide_data\n\nclass TestNuclideData(unittest.TestCase):\n\n def test_isotopes(self):\n \"\"\"Do we have the correct isotopes for select elements?\"\"\"\n\n zs = [1, 8, 56, 95]\n isos = list(map(list, [range(1, 8),\n range(12, 29),\n range(112, 154),\n range(230, 250),]))\n for z, ref_iso in zip(zs, isos):\n iso = nuclide_data.isotopes[z]\n assert ref_iso == iso\n\n\n def test_isomers(self):\n \"\"\"Do we have the correct isomers for select nuclides?\"\"\"\n\n nuclides = [ (4,10), (7,14), (52,115), (81,185), (115,290) ]\n isomer_states = [ [0., 3.3680, 5.9584, 6.1793],\n [0., 8.49, 8.964, 9.129],\n [0., 0.02, 0.2801],\n [0., 0.4548],\n [0.,] ]\n\n for n, ref_isomers in zip(nuclides, isomer_states):\n isomers = nuclide_data.isomers(*n)\n assert ref_isomers == isomers\n\n def ufloat_equiv(self, a, b):\n try:\n return ( np.allclose([a.nominal_value], [b.nominal_value])\n and np.allclose([a.std_dev], [b.std_dev]) )\n except AttributeError:\n return a == b\n #return np.allclose([a], [b])\n\n def test_data(self):\n \"\"\"\n Do we have the correct weight, abundance, and half-life for select nuclides?\n \"\"\"\n nuclides = [ (1,1), (19, 49), (63, 148), (96, 240) ]\n weights = [ unc.ufloat_fromstr(\"1.00782503207(10)\"),\n unc.ufloat_fromstr(\"48.967450(80)\"),\n unc.ufloat_fromstr(\"147.918086(11)\"),\n unc.ufloat_fromstr(\"240.0555295(25)\"), ]\n abundances = [ unc.ufloat_fromstr(\"0.999885(70)\"), 0., 0., 0. ]\n half_lifes = [ np.inf, 1.26E+00, 4.71E+06, 2.33E+06]\n stable = [True, False, False, False]\n\n for i in range(len(nuclides)):\n d = nuclide_data.nuc(*nuclides[i])\n\n assert self.ufloat_equiv(d['weight'], weights[i])\n assert self.ufloat_equiv(d['abundance'], abundances[i])\n assert d['half-life'] == half_lifes[i]\n assert d['stable'] == stable[i]\n\n\n def test_weight(self):\n \"\"\"\n Does weight() function work as expected?\n \"\"\"\n symbols = [ 'H', 'K', 'eu', 'CM' ]\n nuclides = [ (1,1), (19, 49), (63, 148), (96, 240) ]\n weights = [ 1.00782503207, 48.967450, 147.918086, 240.0555295, ]\n\n for i in range(len(nuclides)):\n\n assert weights[i] == nuclide_data.weight(*nuclides[i])\n assert weights[i] == nuclide_data.weight(symbols[i], nuclides[i][1])\n assert weights[i] == nuclide_data.weight(\n \"{0}-{1}\".format(symbols[i], nuclides[i][1]))\n\n\n def test_zaids(self):\n \"\"\"Does zaid conversion work correctly?\"\"\"\n\n zaids = [92235, 3006, \"54135\", \"08016\"]\n zas = [(92,235), (3,6), (54, 135), (8, 16)]\n for zaid, ref_za in zip(zaids, zas):\n za = nuclide_data.zaid2za(zaid)\n assert ref_za == za\n\n def test_Nuclide_class_init(self):\n \"\"\"Does Nuclide class correctly identify nuclide for a variety of inputs?\"\"\"\n\n class Foo:\n pass\n\n nuc_obj = Foo()\n nuc_obj.Z = 92\n nuc_obj.A = 235\n\n nuc_ids = [ 'U235', 'U-235', '235U', '235-U',\n 'u235', 'u-235', '235u', '235-u',\n 92235, \"92235\",\n (92,235), [92, 235],\n {'Z':92, 'A':235},\n nuc_obj\n ]\n\n for nuc_id in nuc_ids:\n nuclide = nuclide_data.Nuclide(nuc_id)\n\n assert nuclide.Z == 92\n assert nuclide.A == 235\n assert nuclide.element == 'U'\n assert nuclide.metastable == False\n assert nuclide.E == 0.\n\n def test_Nuclide_class_init_2(self):\n \"\"\"Does Nuclide class correctly identify nuclide for a variety of inputs?\"\"\"\n\n class Foo:\n pass\n\n nuc_obj = Foo()\n nuc_obj.Z = 3\n nuc_obj.A = 6\n\n nuc_ids = [ 'Li6', 'LI-6', '6LI', '6-Li',\n 'li6', 'lI-6', '6li', '6-li',\n 3006, \"03006\",\n (3,6), [3, 6],\n {'Z': 3, 'A':6},\n nuc_obj\n ]\n\n for nuc_id in nuc_ids:\n nuclide = nuclide_data.Nuclide(nuc_id)\n\n assert nuclide.Z == 3\n assert nuclide.A == 6\n assert nuclide.element == 'Li'\n assert nuclide.metastable == False\n assert nuclide.E == 0.\n\n\n def test_Nuclide_class_init_with_E(self):\n \"\"\"Does Nuclide class correctly get isomeric energy?\"\"\"\n E_ref = 0.2283\n\n class Foo:\n pass\n\n nuc_obj = Foo()\n nuc_obj.Z = 13\n nuc_obj.A = 26\n nuc_obj.E = E_ref\n\n nuc_ids = [ ('Al26', E_ref), (13026, E_ref),\n ((13, 26, E_ref),), ([13, 26, E_ref],),\n ({'Z': 13, 'A': 26, 'E': E_ref},),\n (nuc_obj,)\n ]\n\n for nuc_id in nuc_ids:\n nuclide = nuclide_data.Nuclide(*nuc_id)\n\n # Primary check\n assert np.allclose([nuclide.E,], [E_ref])\n\n # Secondary check\n assert nuclide.Z == 13\n assert nuclide.A == 26\n assert nuclide.element == 'Al'\n assert nuclide.metastable == True\n\n\n def test_Nuclide_class_init_with_metastable(self):\n \"\"\"Does Nuclide class correctly understand metastable notation?\"\"\"\n E_ref = 0.5\n\n nuclide = nuclide_data.Nuclide('Li6m', E_ref)\n\n # Primary check\n assert np.allclose([nuclide.E,], [E_ref])\n assert nuclide.Z == 3\n assert nuclide.A == 6\n assert nuclide.element == 'Li'\n assert nuclide.metastable == True\n\n\n nuclide = nuclide_data.Nuclide('LI-6M')\n\n # Primary check\n assert nuclide.E is np.inf\n assert nuclide.Z == 3\n assert nuclide.A == 6\n assert nuclide.element == 'Li'\n assert nuclide.metastable == True\n\n nuclide = nuclide_data.Nuclide((3,6), metastable=True)\n\n # Primary check\n assert nuclide.E is np.inf\n assert nuclide.Z == 3\n assert nuclide.A == 6\n assert nuclide.element == 'Li'\n assert nuclide.metastable == True\n\n\n def test_Nuclide_class_init_with_alternate_metastable(self):\n\n alternate_zaid = nuclide_data.Nuclide('Li6').zaid() + 400\n nuclide = nuclide_data.Nuclide(alternate_zaid)\n\n assert nuclide.Z == 3\n assert nuclide.A == 6\n assert nuclide.element == 'Li'\n assert nuclide.metastable == True\n assert nuclide.zaid() == 3006\n assert nuclide.zaid(alternate=True) == 3406\n\n\n def test_Nuclide_class_MAT(self):\n \"\"\"Does Nuclide class correctly set MAT?\"\"\"\n\n nuc_ids = {\n 'H - 1 ' : 125,\n 'He- 4 ' : 228,\n 'N - 15 ' : 728,\n 'O - 16 ' : 825,\n 'Si- 29 ' : 1428,\n 'Ca- 44 ' : 2037,\n 'Sc- 45 ' : 2125,\n 'Fe- 54 ' : 2625,\n 'Co- 58 ' : 2722,\n 'Co- 58M' : 2723,\n 'Se- 82 ' : 3449,\n 'Sr- 87 ' : 3834,\n 'Ag-109 ' : 4731,\n 'Ag-110M' : 4735,\n 'Cd-115M' : 4853,\n 'Sn-112 ' : 5025,\n 'Gd-152 ' : 6425,\n 'Ra-226 ' : 8834,\n 'U -235 ' : 9228,\n 'Am-242 ' : 9546,\n 'Am-242M' : 9547,\n 'Am-243 ' : 9549,\n 'Am-244 ' : 9552,\n 'Am-244M' : 9553,\n 'Cm-240 ' : 9625,\n 'Cm-241 ' : 9628,\n 'Bk-246 ' : 9743,\n 'Cf-254 ' : 9867,\n 'Es-253 ' : 9913,\n 'Es-254 ' : 9914,\n 'Es-254M' : 9915,\n 'Es-255 ' : 9916,\n 'Fm-255 ' : 9936,\n }\n\n for nuc_id in nuc_ids:\n assert nuclide_data.Nuclide(nuc_id).mat == nuc_ids[nuc_id]\n\n\nif __name__ == '__main__':\n unittest.main()\n"
] |
[
[
"numpy.allclose"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
slowy07/tf-quant-finance
|
[
"0976f720fb58a2d7bfd863640c12a2425cd2f94f",
"0976f720fb58a2d7bfd863640c12a2425cd2f94f",
"9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6",
"0976f720fb58a2d7bfd863640c12a2425cd2f94f",
"9372eb1ddf2b48cb1a3d4283bc67a10647ddc7a6"
] |
[
"tf_quant_finance/experimental/local_stochastic_volatility/local_stochastic_volatility_model_test.py",
"tf_quant_finance/black_scholes/variance_swaps_test.py",
"tf_quant_finance/experimental/instruments/cap_floor_test.py",
"tf_quant_finance/math/root_search/__init__.py",
"tf_quant_finance/experimental/pricing_platform/framework/rate_instruments/cashflow_streams.py"
] |
[
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for the Local stochastic volatility model.\"\"\"\n\nfrom absl.testing import parameterized\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nimport tf_quant_finance as tff\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\nbs = tff.black_scholes\nlsv = tff.experimental.local_stochastic_volatility\nvolatility_surface = tff.experimental.pricing_platform.framework.market_data.volatility_surface\n\n\n# This function can't be moved to SetUp since that would break graph mode\n# execution\ndef build_tensors(dim, spot, risk_free_rate):\n year = [[2021, 2022]] * dim\n month = [[1, 1]] * dim\n day = [[1, 1]] * dim\n expiries = tff.datetime.dates_from_year_month_day(year, month, day)\n valuation_date = [(2020, 1, 1)]\n expiry_times = tff.datetime.daycount_actual_365_fixed(\n start_date=valuation_date, end_date=expiries, dtype=tf.float64)\n moneyness = [[[0.1, 0.9, 1.0, 1.1, 3], [0.1, 0.9, 1.0, 1.1, 3]]] * dim\n strikes = spot * np.array(moneyness) * np.exp(\n risk_free_rate * np.array([[1.0], [2.0]]))\n iv = [[[0.135, 0.12, 0.1, 0.11, 0.13], [0.135, 0.12, 0.1, 0.11, 0.13]]] * dim\n return valuation_date, expiries, expiry_times, strikes, iv\n\n\ndef build_volatility_surface(val_date, expiry_times, expiries, strikes, iv,\n dtype):\n interpolator = tff.math.interpolation.interpolation_2d.Interpolation2D(\n expiry_times, strikes, iv, dtype=dtype)\n\n def _interpolator(t, x):\n x_transposed = tf.transpose(x)\n t = tf.broadcast_to(t, x_transposed.shape)\n return tf.transpose(interpolator.interpolate(t, x_transposed))\n\n return volatility_surface.VolatilitySurface(\n val_date, expiries, strikes, iv, interpolator=_interpolator, dtype=dtype)\n\n\n# @test_util.run_all_in_graph_and_eager_modes\nclass LocalStochasticVolatilityTest(tf.test.TestCase, parameterized.TestCase):\n\n def get_implied_vol(self, time, strike, paths, spot, r, dtype):\n r = tf.convert_to_tensor(r, dtype=dtype)\n discount_factor = tf.math.exp(-r * time)\n paths = tf.boolean_mask(paths, tf.math.logical_not(tf.math.is_nan(paths)))\n option_value = tf.math.reduce_mean(tf.nn.relu(paths - strike))\n iv = bs.implied_vol(\n prices=discount_factor * option_value,\n strikes=strike,\n expiries=time,\n spots=spot,\n discount_factors=discount_factor,\n dtype=dtype,\n validate_args=True)\n return iv\n\n @parameterized.named_parameters(\n ('1d', 1, 0.0, [0.0], [1.0], [1.0], 0.1, 0.1, 0.0, 0.2, True),\n ('1d_corr', 1, -0.5, [0.0], [1.0], [1.0], 0.1, 0.1, 0.0, 0.2, True),\n ('1d_nonzero_rate', 1, 0.0, [0.05], [1.0], [1.0\n ], 0.1, 0.1, 0.0, 0.2, True),\n ('1d_low_var', 1, 0.0, [0.0], [1.0], [0.04], 0.1, 0.1, 0.0, 0.2, True),\n ('1d_high_volvol', 1, 0.0, [0.0], [1.0], [0.04\n ], 0.1, 0.1, 1.0, 0.5, True),\n ('1d_using_vol_surface', 1, 0.0, [0.0], [1.0], [1.0], 0.1, 0.1, 0.0, 0.2,\n False),\n )\n def test_lv_correctness(self, dim, rho, risk_free_rate, spot, variance,\n pde_time_step, sim_time_step, mr, volvol,\n using_market_data):\n \"\"\"Tests that the model reproduces implied volatility smile.\"\"\"\n dtype = tf.float64\n num_samples = 50000\n var_model = lsv.LSVVarianceModel(\n mr, variance, volvol * np.sqrt(variance), dtype=dtype)\n val_date, expiries, expiry_times, strikes, iv = build_tensors(\n dim, spot, risk_free_rate)\n if using_market_data:\n model = lsv.LocalStochasticVolatilityModel.from_market_data(\n val_date,\n expiries,\n strikes,\n iv,\n var_model,\n spot,\n variance,\n rho,\n risk_free_rate, [0.0],\n pde_time_step,\n 200,\n dtype=dtype)\n else:\n vs = build_volatility_surface(\n val_date, expiry_times, expiries, strikes, iv, dtype=dtype)\n model = lsv.LocalStochasticVolatilityModel.from_volatility_surface(\n vs,\n var_model,\n spot,\n variance,\n rho,\n risk_free_rate, [0.0],\n pde_time_step,\n 200,\n dtype=dtype)\n\n paths = model.sample_paths(\n [1.0, 2.0],\n num_samples=num_samples,\n initial_state=[spot[0], variance[0]],\n time_step=sim_time_step,\n random_type=tff.math.random.RandomType.STATELESS_ANTITHETIC,\n seed=[1, 2])\n\n for d in range(dim):\n for i in range(2):\n for j in [1, 2, 3]:\n sim_iv = self.evaluate(\n self.get_implied_vol(expiry_times[d][i], strikes[d][i][j],\n paths[:, i,\n d], spot[d], risk_free_rate, dtype))\n self.assertAllClose(sim_iv[0], iv[d][i][j], atol=0.007, rtol=0.007)\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Lint as: python3\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for variance_swaps.\"\"\"\n\nfrom absl.testing import parameterized\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nimport tf_quant_finance as tff\nfrom tf_quant_finance.black_scholes import variance_swaps\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass VarianceSwapsTest(parameterized.TestCase, tf.test.TestCase):\n \"\"\"Tests for the variance_swaps module.\"\"\"\n\n @parameterized.named_parameters(\n {\n 'testcase_name': 'Puts',\n 'strikes': np.array([100.0, 90.0, 80.0, 70.0])\n }, {\n 'testcase_name': 'Calls',\n 'strikes': np.array([100.0, 110.0, 120.0, 130.0])\n })\n def test_replicating_weights(self, strikes):\n \"\"\"Tests ability to match 'hand' calculated variance replicating weights.\"\"\"\n reference_strikes = 100.0\n delta_strike = 10.0\n expiries = 1.0\n # This is the value of (A 4) in Demeterfi et al.\n payoff_values = 2 * ((strikes - reference_strikes) / reference_strikes -\n np.log(strikes / reference_strikes))\n # This is the value of the ratio term in (A 7) in Demeterfi et al.\n slope_values = np.diff(payoff_values / delta_strike)\n # Literal calculation of (A 7/8) for all weights. The library uses\n # first differences rather than cumsums for efficiency due to algebra.\n expected_weights = []\n for v in slope_values:\n expected_weights.append(v - np.sum(expected_weights))\n weights = self.evaluate(\n variance_swaps.replicating_weights(\n strikes, reference_strikes, expiries, dtype=tf.float64))\n self.assertAllClose(weights, expected_weights, 1e-6)\n\n def test_replicating_weights_supports_batching(self):\n put_strikes = tf.constant([[100, 95, 90, 85]], dtype=np.float64)\n batch_put_strikes = batch_put_strikes = tf.concat(\n [put_strikes, put_strikes, 2 * (put_strikes - 100) + 100], axis=0)\n batch_reference = tf.math.reduce_max(batch_put_strikes, axis=1)\n batch_expiries = tf.constant([0.25, 0.5, 0.25], dtype=tf.float64)\n expected_shape = np.array(batch_put_strikes.shape)\n expected_shape[-1] = expected_shape[-1] - 1\n batch_weights = self.evaluate(\n variance_swaps.replicating_weights(\n batch_put_strikes,\n batch_reference,\n batch_expiries))\n self.assertAllEqual(batch_weights.shape, expected_shape)\n for i in range(3):\n row_weights = self.evaluate(\n variance_swaps.replicating_weights(\n batch_put_strikes[i, :], batch_reference[i], batch_expiries[i]))\n self.assertAllEqual(row_weights, batch_weights[i, :])\n\n def test_replicating_weights_raises_validation_error(self):\n strikes = np.array([1, 2, 3, 2, 1])\n reference_strike = 3\n expiry = 1\n with self.assertRaises(tf.errors.InvalidArgumentError):\n _ = self.evaluate(\n variance_swaps.replicating_weights(\n strikes,\n reference_strike,\n expiry,\n validate_args=True,\n dtype=tf.float64))\n\n @parameterized.named_parameters({\n 'testcase_name':\n 'Demeterfi_et_al',\n 'call_strikes':\n np.array([100., 105., 110., 115., 120., 125., 130., 135., 140.]),\n 'call_weights':\n np.array([19.63, 36.83, 33.55, 30.69, 28.19, 25.98, 24.02, 22.27]),\n 'call_volatilities':\n np.array([0.2, 0.19, 0.18, 0.17, 0.16, 0.15, 0.14, 0.13, np.nan]),\n 'put_strikes':\n np.array(\n [100., 95., 90., 85., 80., 75., 70., 65., 60., 55., 50., 45.]),\n 'put_weights':\n np.array([\n 20.98, 45., 50.15, 56.23, 63.49, 72.26, 82.98, 96.27, 113.05,\n 134.63, 163.04\n ]),\n 'put_volatilities':\n np.array([\n 0.2, 0.21, 0.22, 0.23, 0.24, 0.25, 0.26, 0.27, 0.28, 0.29, 0.30,\n np.nan\n ]),\n 'reference_strikes':\n 100.0,\n 'expiries':\n 0.25,\n 'discount_rates':\n 0.05,\n # Paper rounds to 2 dp in places (and variably within columns elsewhere)\n 'tolerance':\n 1e-2,\n 'k_var':\n 0.20467**2, # Paper works on % scale.\n })\n def test_variance_swap_demeterfi_example(self, call_strikes, call_weights,\n call_volatilities, put_strikes,\n put_weights, put_volatilities,\n reference_strikes, expiries,\n discount_rates, tolerance, k_var):\n \"\"\"Tests ability to match 'hand' calculated variance replicating weights.\"\"\"\n # Paper quotes weights inflated to forward values.\n discount_factor = np.exp(discount_rates * expiries)\n calculated_call_weights = self.evaluate(\n variance_swaps.replicating_weights(\n call_strikes, reference_strikes, expiries, dtype=tf.float64))\n matched_call_weights = discount_factor * 100.0**2 * calculated_call_weights\n self.assertAllClose(matched_call_weights, call_weights, tolerance)\n calculated_put_weights = self.evaluate(\n variance_swaps.replicating_weights(\n put_strikes, reference_strikes, expiries, dtype=tf.float64))\n matched_put_weights = discount_factor * 100.0**2 * calculated_put_weights\n self.assertAllClose(matched_put_weights, put_weights, tolerance)\n variance_price = self.evaluate(\n tff.black_scholes.variance_swap_fair_strike(\n put_strikes,\n put_volatilities,\n call_strikes,\n call_volatilities,\n expiries,\n discount_rates,\n reference_strikes,\n reference_strikes,\n dtype=tf.float64))\n self.assertAllClose(variance_price, k_var, 1e-2)\n\n @parameterized.named_parameters(\n {\n 'testcase_name': 'with_validation',\n 'validate_args': True\n }, {\n 'testcase_name': 'without_validation',\n 'validate_args': False\n })\n def test_variance_swap_fair_strike_supports_batching(self, validate_args):\n dtype = tf.float64\n batch_call_strikes = tf.repeat(\n tf.expand_dims(tf.range(100, 120, 5, dtype=dtype), 0), 3, axis=0)\n batch_put_strikes = tf.repeat(\n tf.expand_dims(tf.range(100, 80, -5, dtype=dtype), 0), 3, axis=0)\n batch_vols = 0.2 * tf.ones((3, 4), dtype=dtype)\n batch_shape = (3,)\n reference_strikes = 100.0 * tf.ones(batch_shape, dtype=dtype)\n batch_expiries = tf.constant([0.25, 0.5, 1.0], dtype=dtype)\n discount_rates = 0.05 * tf.ones(batch_shape, dtype=dtype)\n batch_variance_price = self.evaluate(\n tff.black_scholes.variance_swap_fair_strike(\n batch_put_strikes,\n batch_vols,\n batch_call_strikes,\n batch_vols,\n batch_expiries,\n discount_rates,\n reference_strikes,\n reference_strikes,\n validate_args=validate_args,\n dtype=dtype))\n\n self.assertEqual(batch_variance_price.shape, batch_shape)\n for i in range(3):\n row_variance_price = self.evaluate(\n tff.black_scholes.variance_swap_fair_strike(\n batch_put_strikes[i, :],\n batch_vols[i, :],\n batch_call_strikes[i, :],\n batch_vols[i, :],\n batch_expiries[i],\n discount_rates[i],\n reference_strikes[i],\n reference_strikes[i],\n dtype=tf.float64))\n self.assertAllEqual(row_variance_price, batch_variance_price[i])\n\n def test_variance_swap_fair_strike_raises_validation_error(self):\n dtype = tf.float64\n # Mismatching shapes for strikes and vols.\n strikes = tf.ones((3, 2), dtype=dtype)\n vols = tf.ones((3, 4), dtype=dtype)\n reference_strike = 1.0\n discount_rate = 0.0\n expiry = 1.0\n with self.assertRaises(tf.errors.InvalidArgumentError):\n _ = self.evaluate(\n tff.black_scholes.variance_swap_fair_strike(\n strikes,\n vols,\n strikes,\n vols,\n expiry,\n discount_rate,\n reference_strike,\n reference_strike,\n validate_args=True,\n dtype=dtype))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for cap_floor.py.\"\"\"\n\nfrom absl.testing import parameterized\n\nimport numpy as np\nfrom numpy import testing as np_testing\nimport tensorflow.compat.v2 as tf\n\nimport tf_quant_finance as tff\nfrom tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import\ndates = tff.datetime\ninstruments = tff.experimental.instruments\n\n\n@test_util.run_all_in_graph_and_eager_modes\nclass CapFloorTest(tf.test.TestCase, parameterized.TestCase):\n\n def setUp(self):\n super(CapFloorTest, self).setUp()\n self.maturity_date = [(2022, 1, 15)]\n self.start_date = [(2021, 1, 15)]\n self.valuation_date = [(2021, 1, 1)]\n\n def get_market(self):\n val_date = dates.convert_to_date_tensor(self.valuation_date)\n curve_dates = val_date + dates.months([0, 3, 12, 24])\n reference_curve = instruments.RateCurve(\n curve_dates,\n np.array([0.005, 0.01, 0.015, 0.02], dtype=np.float64),\n valuation_date=val_date,\n dtype=np.float64)\n market = instruments.InterestRateMarket(\n reference_curve=reference_curve, discount_curve=reference_curve)\n return market\n\n @parameterized.named_parameters(\n ('DoublePrecision', np.float64),\n )\n def test_cap_correctness(self, dtype):\n notional = 100.0\n\n period3m = dates.months(3)\n\n cap = instruments.CapAndFloor(\n self.start_date,\n self.maturity_date,\n period3m,\n 0.005,\n daycount_convention=instruments.DayCountConvention.ACTUAL_365,\n notional=notional,\n dtype=dtype)\n\n price = self.evaluate(\n cap.price(\n self.valuation_date,\n self.get_market(),\n model=instruments.InterestRateModelType.LOGNORMAL_RATE,\n pricing_context=0.5))\n np_testing.assert_allclose(price, 1.0474063612452953, atol=1e-6)\n\n @parameterized.named_parameters(\n ('DoublePrecision', np.float64),\n )\n def test_floor_correctness(self, dtype):\n notional = 100.0\n period3m = dates.months(3)\n cap = instruments.CapAndFloor(\n self.start_date,\n self.maturity_date,\n period3m,\n 0.01, # since this is a floor, we use different strike\n daycount_convention=instruments.DayCountConvention.ACTUAL_365,\n notional=notional,\n is_cap=False,\n dtype=dtype)\n price = self.evaluate(\n cap.price(\n self.valuation_date,\n self.get_market(),\n model=instruments.InterestRateModelType.LOGNORMAL_RATE,\n pricing_context=0.5))\n np_testing.assert_allclose(price, 0.01382758837128641, atol=1e-6)\n\n @parameterized.named_parameters(\n ('DoublePrecision', np.float64),\n )\n def test_cap_many(self, dtype):\n notional = 100.\n batch_maturity_date = dates.convert_to_date_tensor([(2022, 1, 15),\n (2022, 1, 15)])\n batch_start_date = dates.convert_to_date_tensor([(2021, 1, 15),\n (2021, 1, 15)])\n batch_valuation_date = dates.convert_to_date_tensor([(2021, 1, 1)])\n\n period3m = dates.months(3)\n cap = instruments.CapAndFloor(\n batch_start_date,\n batch_maturity_date,\n period3m,\n [0.005, 0.01],\n daycount_convention=instruments.DayCountConvention.ACTUAL_365,\n notional=notional,\n dtype=dtype)\n price = self.evaluate(\n cap.price(\n batch_valuation_date,\n self.get_market(),\n model=instruments.InterestRateModelType.LOGNORMAL_RATE,\n pricing_context=0.5))\n np_testing.assert_allclose(price,\n [1.0474063612452953, 0.5656630014452084],\n atol=1e-6)\n\n @parameterized.named_parameters(\n ('DoublePrecision', np.float64),\n )\n def test_cap_reset(self, dtype):\n notional = 100.0\n maturity_date = dates.convert_to_date_tensor([(2022, 1, 15),\n (2022, 1, 15)])\n start_date = dates.convert_to_date_tensor([(2021, 1, 15),\n (2021, 1, 15)])\n valuation_date = dates.convert_to_date_tensor([(2021, 2, 1)])\n\n period3m = dates.months(3)\n cap = instruments.CapAndFloor(\n start_date,\n maturity_date,\n period3m,\n [0.005, 0.01],\n daycount_convention=instruments.DayCountConvention.ACTUAL_365,\n notional=notional,\n dtype=dtype)\n curve_valuation_date = dates.convert_to_date_tensor([(2021, 1, 1)])\n curve_dates = curve_valuation_date + dates.months([0, 3, 12, 24])\n reference_curve = instruments.RateCurve(\n curve_dates,\n np.array([0.005, 0.01, 0.015, 0.02], dtype=np.float64),\n valuation_date=curve_valuation_date,\n dtype=np.float64)\n market = instruments.InterestRateMarket(\n reference_curve=reference_curve,\n discount_curve=reference_curve,\n libor_rate=[0.006556, 0.006556])\n\n price = self.evaluate(\n cap.price(\n valuation_date,\n market,\n model=instruments.InterestRateModelType.LOGNORMAL_RATE,\n pricing_context=0.5))\n np_testing.assert_allclose(price,\n [0.9389714183634128, 0.5354250398709062],\n atol=1e-6)\n\n @parameterized.named_parameters(\n ('DoublePrecision', np.float64),\n )\n def test_cap_fwd_rate(self, dtype):\n notional = 100.0\n period3m = dates.months(3)\n cap = instruments.CapAndFloor(\n self.start_date,\n self.maturity_date,\n period3m,\n 0.005,\n daycount_convention=instruments.DayCountConvention.ACTUAL_365,\n notional=notional,\n dtype=dtype)\n fwd_rates = self.evaluate(\n cap._get_forward_rate(\n dates.convert_to_date_tensor(self.valuation_date),\n self.get_market()))\n print(fwd_rates)\n np_testing.assert_allclose(fwd_rates,\n [0.010966, 0.013824, 0.017164, 0.020266],\n atol=1e-6)\n\n @parameterized.named_parameters(\n ('DoublePrecision', np.float64),\n )\n def test_cap_price_lognormal_rate_model(self, dtype):\n notional = 100.0\n period3m = dates.months(3)\n cap = instruments.CapAndFloor(\n self.start_date,\n self.maturity_date,\n period3m,\n 0.005,\n daycount_convention=instruments.DayCountConvention.ACTUAL_365,\n notional=notional,\n dtype=dtype)\n price = self.evaluate(\n cap._price_lognormal_rate(\n dates.convert_to_date_tensor(self.valuation_date),\n self.get_market(),\n pricing_context=0.5))\n print(price)\n np_testing.assert_allclose(\n price, [0.146671, 0.218595, 0.303358, 0.378782], atol=1e-6)\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# Lint as: python3\n# Copyright 2021 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Ops related to root search.\"\"\"\n\n\nfrom tf_quant_finance.math.root_search.brent import brentq\nfrom tf_quant_finance.math.root_search.newton import root_finder as newton_root\n\nfrom tensorflow.python.util.all_util import remove_undocumented # pylint: disable=g-direct-tensorflow-import\n\n_allowed_symbols = [\n 'brentq',\n 'newton_root',\n]\n\nremove_undocumented(__name__, _allowed_symbols)\n",
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Cashflow streams objects.\"\"\"\n\nfrom typing import Optional, Tuple, Callable, Any, List, Union\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\nfrom tf_quant_finance import datetime as dateslib\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import curve_types as curve_types_lib\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import processed_market_data as pmd\nfrom tf_quant_finance.experimental.pricing_platform.framework.core import types\nfrom tf_quant_finance.experimental.pricing_platform.framework.market_data import rate_curve\nfrom tf_quant_finance.experimental.pricing_platform.framework.market_data import utils as market_data_utils\nfrom tf_quant_finance.experimental.pricing_platform.framework.rate_instruments import coupon_specs\nfrom tf_quant_finance.experimental.pricing_platform.instrument_protos import period_pb2\nfrom tf_quant_finance.math import pad\n\n\n_CurveType = curve_types_lib.CurveType\n\n\nclass FixedCashflowStream:\n \"\"\"Represents a batch of fixed stream of cashflows.\"\"\"\n\n def __init__(self,\n coupon_spec: coupon_specs.FixedCouponSpecs,\n discount_curve_type: Union[_CurveType, List[_CurveType]],\n start_date: types.DateTensor = None,\n end_date: types.DateTensor = None,\n discount_curve_mask: types.IntTensor = None,\n first_coupon_date: Optional[types.DateTensor] = None,\n penultimate_coupon_date: Optional[types.DateTensor] = None,\n schedule_fn: Optional[Callable[..., Any]] = None,\n schedule: Optional[types.DateTensor] = None,\n dtype: Optional[types.Dtype] = None,\n name: Optional[str] = None):\n \"\"\"Initializes a batch of fixed cashflow streams.\n\n Args:\n coupon_spec: An instance of `FixedCouponSpecs` specifying the\n details of the coupon payment for the cashflow stream.\n discount_curve_type: An instance of `CurveType` or a list of those.\n If supplied as a list and `discount_curve_mask` is not supplied,\n the size of the list should be the same as the number of priced\n instruments. Defines discount curves for the instruments.\n start_date: A `DateTensor` of `batch_shape` specifying the starting dates\n of the accrual of the first coupon of the cashflow stream. The shape of\n the input correspond to the number of streams being created.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Either this of `schedule` should be supplied\n Default value: `None`\n end_date: A `DateTensor` of `batch_shape`specifying the end dates for\n accrual of the last coupon in each cashflow stream. The shape of the\n input should be the same as that of `start_date`.\n Either this of `schedule` should be supplied\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: `None`\n discount_curve_mask: An optional integer `Tensor` of values ranging from\n `0` to `len(discount_curve_type) - 1` and of shape `batch_shape`.\n Identifies a mapping between `discount_curve_type` list and the\n underlying instruments.\n Default value: `None`.\n first_coupon_date: An optional `DateTensor` specifying the payment dates\n of the first coupon of the cashflow stream. Use this input for cashflows\n with irregular first coupon. Should be of the same shape as\n `start_date`.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: None which implies regular first coupon.\n penultimate_coupon_date: An optional `DateTensor` specifying the payment\n dates of the penultimate (next to last) coupon of the cashflow\n stream. Use this input for cashflows with irregular last coupon.\n Should be of the same shape as `end_date`.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: None which implies regular last coupon.\n schedule_fn: A callable that accepts `start_date`, `end_date`,\n `coupon_frequency`, `settlement_days`, `first_coupon_date`, and\n `penultimate_coupon_date` as `Tensor`s and returns coupon payment\n days.\n Default value: `None`.\n schedule: A `DateTensor` of coupon payment dates including the start and\n end dates of the cashflows.\n Default value: `None`.\n dtype: `tf.Dtype` of the input and output real `Tensor`s.\n Default value: None which maps to the default dtype inferred by\n TensorFlow.\n name: Python str. The name to give to the ops created by this class.\n Default value: `None` which maps to 'fixed_cashflow_stream'.\n \"\"\"\n self._name = name or \"fixed_cashflow_stream\"\n\n with tf.name_scope(self._name):\n curve_list = to_list(discount_curve_type)\n [\n self._discount_curve_type,\n self._mask\n ] = process_curve_types(curve_list, discount_curve_mask)\n\n if schedule is None:\n if (start_date is None) or (end_date is None):\n raise ValueError(\"If `schedule` is not supplied both \"\n \"`start_date` and `end_date` should be supplied\")\n if isinstance(start_date, tf.Tensor):\n self._start_date = dateslib.dates_from_tensor(\n start_date)\n else:\n self._start_date = dateslib.convert_to_date_tensor(\n start_date)\n if isinstance(start_date, tf.Tensor):\n self._end_date = dateslib.dates_from_tensor(\n end_date)\n else:\n self._end_date = dateslib.convert_to_date_tensor(\n end_date)\n self._first_coupon_date = first_coupon_date\n self._penultimate_coupon_date = penultimate_coupon_date\n if self._first_coupon_date is not None:\n if isinstance(start_date, tf.Tensor):\n self._first_coupon_date = dateslib.dates_from_tensor(\n first_coupon_date)\n else:\n self._first_coupon_date = dateslib.convert_to_date_tensor(\n first_coupon_date)\n if self._penultimate_coupon_date is not None:\n if isinstance(start_date, tf.Tensor):\n self._penultimate_coupon_date = dateslib.dates_from_tensor(\n penultimate_coupon_date)\n else:\n self._penultimate_coupon_date = dateslib.convert_to_date_tensor(\n penultimate_coupon_date)\n\n # Update coupon frequency\n coupon_frequency = _get_attr(coupon_spec, \"coupon_frequency\")\n if isinstance(coupon_frequency, period_pb2.Period):\n coupon_frequency = market_data_utils.get_period(\n _get_attr(coupon_spec, \"coupon_frequency\"))\n if isinstance(coupon_frequency, (list, tuple)):\n coupon_frequency = market_data_utils.period_from_list(\n *_get_attr(coupon_spec, \"coupon_frequency\"))\n if isinstance(coupon_frequency, dict):\n coupon_frequency = market_data_utils.period_from_dict(\n _get_attr(coupon_spec, \"coupon_frequency\"))\n\n businessday_rule = coupon_spec.businessday_rule\n # Business day roll convention and the end of month flag\n roll_convention, eom = market_data_utils.get_business_day_convention(\n businessday_rule)\n\n notional = tf.convert_to_tensor(\n _get_attr(coupon_spec, \"notional_amount\"),\n dtype=dtype,\n name=\"notional\")\n self._dtype = dtype or notional.dtype\n fixed_rate = tf.convert_to_tensor(_get_attr(coupon_spec, \"fixed_rate\"),\n dtype=self._dtype,\n name=\"fixed_rate\")\n\n daycount_fn = market_data_utils.get_daycount_fn(\n _get_attr(coupon_spec, \"daycount_convention\"), self._dtype)\n\n self._settlement_days = tf.convert_to_tensor(\n _get_attr(coupon_spec, \"settlement_days\"),\n dtype=tf.int32,\n name=\"settlement_days\")\n\n if schedule is not None:\n if isinstance(schedule, tf.Tensor):\n coupon_dates = dateslib.dates_from_tensor(schedule)\n else:\n coupon_dates = dateslib.convert_to_date_tensor(schedule)\n # Extract starting date for the cashflow\n self._start_date = coupon_dates[..., 0]\n elif schedule_fn is None:\n # TODO(b/160446193): Calendar is ignored and weekends only is used\n calendar = dateslib.create_holiday_calendar(\n weekend_mask=dateslib.WeekendMask.SATURDAY_SUNDAY)\n self._calendar = calendar\n coupon_dates = _generate_schedule(\n start_date=self._start_date,\n end_date=self._end_date,\n coupon_frequency=coupon_frequency,\n roll_convention=roll_convention,\n calendar=calendar,\n settlement_days=self._settlement_days,\n end_of_month=eom,\n first_coupon_date=self._first_coupon_date,\n penultimate_coupon_date=self._penultimate_coupon_date)\n # Extract starting date for the cashflow\n self._start_date = coupon_dates[..., 0]\n else:\n if first_coupon_date is not None:\n first_coupon_date = self._first_coupon_date.to_tensor()\n if penultimate_coupon_date is not None:\n penultimate_coupon_date = self._penultimate_coupon_date.to_tensor()\n coupon_dates = schedule_fn(\n start_date=self._start_date.to_tensor(),\n end_date=self._end_date.to_tensor(),\n coupon_frequency=coupon_frequency.quantity(),\n settlement_days=self._settlement_days,\n first_coupon_date=first_coupon_date,\n penultimate_coupon_date=penultimate_coupon_date)\n # Convert to DateTensor if the result comes from a tf.function\n coupon_dates = dateslib.convert_to_date_tensor(coupon_dates)\n\n self._batch_shape = tf.shape(coupon_dates.ordinal())[:-1]\n payment_dates = coupon_dates[..., 1:]\n\n daycount_fractions = daycount_fn(\n start_date=coupon_dates[..., :-1],\n end_date=coupon_dates[..., 1:])\n\n coupon_rate = tf.expand_dims(fixed_rate, axis=-1)\n\n self._num_cashflows = tf.shape(payment_dates.ordinal())[-1]\n self._payment_dates = payment_dates\n self._notional = notional\n self._daycount_fractions = daycount_fractions\n self._coupon_rate = coupon_rate\n self._fixed_rate = tf.convert_to_tensor(fixed_rate, dtype=self._dtype)\n self._daycount_fn = daycount_fn\n\n def daycount_fn(self) -> Callable[..., Any]:\n return self._daycount_fn\n\n @property\n def daycount_fractions(self) -> types.FloatTensor:\n return self._daycount_fractions\n\n @property\n def fixed_rate(self) -> types.FloatTensor:\n return self._fixed_rate\n\n @property\n def notional(self) -> types.FloatTensor:\n return self._notional\n\n @property\n def discount_curve_type(self) -> _CurveType:\n return self._discount_curve_type\n\n @property\n def batch_shape(self) -> types.StringTensor:\n return self._batch_shape\n\n @property\n def cashflow_dates(self) -> types.DateTensor:\n return self._payment_dates\n\n def cashflows(self,\n market: pmd.ProcessedMarketData,\n name: Optional[str] = None\n ) -> Tuple[types.DateTensor, types.FloatTensor]:\n \"\"\"Returns cashflows for the fixed leg.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'cashflows'.\n\n Returns:\n A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]` and\n containing the dates and the corresponding cashflows price for each\n stream based on the input market data.\n \"\"\"\n name = name or (self._name + \"_cashflows\")\n with tf.name_scope(name):\n valuation_date = dateslib.convert_to_date_tensor(market.date)\n future_cashflows = tf.cast(self._payment_dates >= valuation_date,\n dtype=self._dtype)\n # self._notional is of shape [batch_shape], so broadcasting is needed\n notional = tf.expand_dims(self._notional, axis=-1)\n # Cashflow present values.\n cashflows = notional * (\n future_cashflows * self._daycount_fractions * self._coupon_rate)\n return self._payment_dates, cashflows\n\n def price(self,\n market: pmd.ProcessedMarketData,\n name: Optional[str] = None):\n \"\"\"Returns the present value of the stream on the valuation date.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'price'.\n\n Returns:\n A `Tensor` of shape `batch_shape` containing the modeled price of each\n stream based on the input market data.\n \"\"\"\n name = name or (self._name + \"_price\")\n with tf.name_scope(name):\n discount_curve = get_discount_curve(\n self._discount_curve_type, market, self._mask)\n discount_factors = discount_curve.discount_factor(\n self._payment_dates)\n _, cashflows = self.cashflows(market)\n # Cashflow present values\n cashflow_pvs = (cashflows * discount_factors)\n return tf.math.reduce_sum(cashflow_pvs, axis=1)\n\n\nclass FloatingCashflowStream:\n \"\"\"Represents a batch of cashflows indexed to a floating rate.\"\"\"\n\n def __init__(self,\n coupon_spec: coupon_specs.FloatCouponSpecs,\n discount_curve_type: Union[_CurveType, List[_CurveType]],\n start_date: types.DateTensor = None,\n end_date: types.DateTensor = None,\n discount_curve_mask: types.IntTensor = None,\n rate_index_curves: Union[\n curve_types_lib.RateIndexCurve,\n List[curve_types_lib.RateIndexCurve]] = None,\n reference_mask: types.IntTensor = None,\n first_coupon_date: Optional[types.DateTensor] = None,\n penultimate_coupon_date: Optional[types.DateTensor] = None,\n schedule_fn: Optional[Callable[..., Any]] = None,\n schedule: Optional[types.DateTensor] = None,\n past_fixing: Optional[types.FloatTensor] = None,\n dtype: Optional[types.Dtype] = None,\n name: Optional[str] = None):\n \"\"\"Initializes a batch of floating cashflow streams.\n\n Args:\n coupon_spec: An instance of `FloatCouponSpecs` specifying the\n details of the coupon payment for the cashflow stream.\n discount_curve_type: An instance of `CurveType` or a list of those.\n If supplied as a list and `discount_curve_mask` is not supplied,\n the size of the list should be the same as the number of priced\n instruments. Defines discount curves for the instruments.\n start_date: A `DateTensor` of `batch_shape` specifying the starting dates\n of the accrual of the first coupon of the cashflow stream. The shape of\n the input correspond to the number of streams being created.\n Either this of `schedule` should be supplied.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: `None`\n end_date: A `DateTensor` of `batch_shape`specifying the end dates for\n accrual of the last coupon in each cashflow stream. The shape of the\n input should be the same as that of `start_date`.\n Either this of `schedule` should be supplied.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: `None`\n discount_curve_mask: An optional integer `Tensor` of values ranging from\n `0` to `len(discount_curve_type) - 1` and of shape `batch_shape`.\n Identifies a mapping between `discount_curve_type` list and the\n underlying instruments.\n Default value: `None`.\n rate_index_curves: An instance of `RateIndexCurve` or a list of those.\n If supplied as a list and `reference_mask` is not supplid,\n the size of the list should be the same as the number of priced\n instruments. Defines the index curves for each instrument. If not\n supplied, `coupon_spec.floating_rate_type` is used to identify the\n curves.\n Default value: `None`.\n reference_mask: An optional integer `Tensor` of values ranging from\n `0` to `len(rate_index_curves) - 1` and of shape `batch_shape`.\n Identifies a mapping between `rate_index_curves` list and the underlying\n instruments.\n Default value: `None`.\n first_coupon_date: An optional `DateTensor` specifying the payment dates\n of the first coupon of the cashflow stream. Use this input for cashflows\n with irregular first coupon. Should be of the same shape as\n `start_date`.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: None which implies regular first coupon.\n penultimate_coupon_date: An optional `DateTensor` specifying the payment\n dates of the penultimate (next to last) coupon of the cashflow\n stream. Use this input for cashflows with irregular last coupon.\n Should be of the same shape as `end_date`.\n When passed as an integet `Tensor`, should be of shape\n `batch_shape + [3]` and contain `[year, month, day]` for each date.\n Default value: None which implies regular last coupon.\n schedule_fn: A callable that accepts `start_date`, `end_date`,\n `coupon_frequency`, `settlement_days`, `first_coupon_date`, and\n `penultimate_coupon_date` as `Tensor`s and returns coupon payment\n days.\n Default value: `None`.\n schedule: A `DateTensor` of coupon payment dates including the start and\n end dates of the cashflows.\n Default value: `None`.\n past_fixing: An optional `Tensor` of shape compatible with\n `batch_shape + [1]`. Represents the fixings for the cashflows as\n observed at `market.date`.\n dtype: `tf.Dtype` of the input and output real `Tensor`s.\n Default value: None which maps to the default dtype inferred by\n TensorFlow.\n name: Python str. The name to give to the ops created by this class.\n Default value: `None` which maps to 'floating_cashflow_stream'.\n \"\"\"\n\n self._name = name or \"floating_cashflow_stream\"\n with tf.name_scope(self._name):\n curve_list = to_list(discount_curve_type)\n [\n self._discount_curve_type,\n self._mask\n ] = process_curve_types(curve_list, discount_curve_mask)\n self._first_coupon_date = None\n self._penultimate_coupon_date = None\n if schedule is None:\n if (start_date is None) or (end_date is None):\n raise ValueError(\"If `schedule` is not supplied both \"\n \"`start_date` and `end_date` should be supplied\")\n\n if schedule is None:\n if isinstance(start_date, tf.Tensor):\n self._start_date = dateslib.dates_from_tensor(\n start_date)\n else:\n self._start_date = dateslib.convert_to_date_tensor(\n start_date)\n if isinstance(start_date, tf.Tensor):\n self._end_date = dateslib.dates_from_tensor(\n end_date)\n else:\n self._end_date = dateslib.convert_to_date_tensor(\n end_date)\n self._first_coupon_date = first_coupon_date\n self._penultimate_coupon_date = penultimate_coupon_date\n if self._first_coupon_date is not None:\n if isinstance(start_date, tf.Tensor):\n self._first_coupon_date = dateslib.dates_from_tensor(\n first_coupon_date)\n else:\n self._first_coupon_date = dateslib.convert_to_date_tensor(\n first_coupon_date)\n if self._penultimate_coupon_date is not None:\n if isinstance(start_date, tf.Tensor):\n self._penultimate_coupon_date = dateslib.dates_from_tensor(\n penultimate_coupon_date)\n else:\n self._penultimate_coupon_date = dateslib.convert_to_date_tensor(\n penultimate_coupon_date)\n # Convert coupon and reset frequencies to PeriodTensor\n coupon_frequency = _get_attr(coupon_spec, \"coupon_frequency\")\n # Update coupon frequency\n if isinstance(coupon_frequency, period_pb2.Period):\n coupon_frequency = market_data_utils.get_period(\n _get_attr(coupon_spec, \"coupon_frequency\"))\n if isinstance(coupon_frequency, (list, tuple)):\n coupon_frequency = market_data_utils.period_from_list(\n *_get_attr(coupon_spec, \"coupon_frequency\"))\n if isinstance(coupon_frequency, dict):\n coupon_frequency = market_data_utils.period_from_dict(\n _get_attr(coupon_spec, \"coupon_frequency\"))\n # Update reset frequency\n reset_frequency = _get_attr(coupon_spec, \"reset_frequency\")\n if isinstance(reset_frequency, period_pb2.Period):\n reset_frequency = market_data_utils.get_period(\n _get_attr(coupon_spec, \"reset_frequency\"))\n if isinstance(reset_frequency, (list, tuple)):\n reset_frequency = market_data_utils.period_from_list(\n *_get_attr(coupon_spec, \"reset_frequency\"))\n if isinstance(reset_frequency, dict):\n reset_frequency = market_data_utils.period_from_dict(\n _get_attr(coupon_spec, \"reset_frequency\"))\n self._reset_frequency = reset_frequency\n businessday_rule = _get_attr(coupon_spec, \"businessday_rule\")\n roll_convention, eom = market_data_utils.get_business_day_convention(\n businessday_rule)\n notional = tf.convert_to_tensor(\n _get_attr(coupon_spec, \"notional_amount\"),\n dtype=dtype,\n name=\"notional\")\n self._dtype = dtype or notional.dtype\n\n daycount_convention = _get_attr(coupon_spec, \"daycount_convention\")\n\n daycount_fn = market_data_utils.get_daycount_fn(\n _get_attr(coupon_spec, \"daycount_convention\"), self._dtype)\n self._daycount_convention = daycount_convention\n\n self._settlement_days = tf.convert_to_tensor(\n _get_attr(coupon_spec, \"settlement_days\"),\n dtype=tf.int32,\n name=\"settlement_days\")\n spread = tf.convert_to_tensor(_get_attr(coupon_spec, \"spread\"),\n dtype=self._dtype,\n name=\"spread\")\n if schedule is not None:\n if isinstance(schedule, tf.Tensor):\n coupon_dates = dateslib.dates_from_tensor(schedule)\n else:\n coupon_dates = dateslib.convert_to_date_tensor(schedule)\n # Extract starting date for the cashflow\n self._start_date = coupon_dates[..., 0]\n elif schedule_fn is None:\n # TODO(b/160446193): Calendar is ignored and weekends only is used\n calendar = dateslib.create_holiday_calendar(\n weekend_mask=dateslib.WeekendMask.SATURDAY_SUNDAY)\n self._calendar = calendar\n coupon_dates = _generate_schedule(\n start_date=self._start_date,\n end_date=self._end_date,\n coupon_frequency=coupon_frequency,\n roll_convention=roll_convention,\n calendar=calendar,\n settlement_days=self._settlement_days,\n end_of_month=eom,\n first_coupon_date=self._first_coupon_date,\n penultimate_coupon_date=self._penultimate_coupon_date)\n # Extract starting date for the cashflow\n self._start_date = coupon_dates[..., 0]\n else:\n if first_coupon_date is not None:\n first_coupon_date = self._first_coupon_date.to_tensor()\n if penultimate_coupon_date is not None:\n penultimate_coupon_date = self._penultimate_coupon_date.to_tensor()\n coupon_dates = schedule_fn(\n start_date=self._start_date.to_tensor(),\n end_date=self._end_date.to_tensor(),\n coupon_frequency=coupon_frequency.quantity(),\n settlement_days=self._settlement_days,\n first_coupon_date=first_coupon_date,\n penultimate_coupon_date=penultimate_coupon_date)\n # Convert to DateTensor if the result comes from a tf.function\n coupon_dates = dateslib.convert_to_date_tensor(coupon_dates)\n # Extract batch shape\n self._batch_shape = tf.shape(coupon_dates.ordinal())[:-1]\n\n accrual_start_dates = coupon_dates[..., :-1]\n\n coupon_start_dates = coupon_dates[..., :-1]\n coupon_end_dates = coupon_dates[..., 1:]\n\n accrual_end_dates = accrual_start_dates + reset_frequency.expand_dims(\n axis=-1)\n\n # Adjust for irregular coupons\n accrual_end_dates = dateslib.DateTensor.concat(\n [coupon_end_dates[..., :1],\n accrual_end_dates[..., 1:-1],\n coupon_end_dates[..., -1:]], axis=-1)\n daycount_fractions = daycount_fn(\n start_date=coupon_start_dates,\n end_date=coupon_end_dates)\n\n self._num_cashflows = tf.shape(daycount_fractions)[-1]\n self._coupon_start_dates = coupon_start_dates\n self._coupon_end_dates = coupon_end_dates\n self._accrual_start_date = accrual_start_dates\n self._accrual_end_date = accrual_end_dates\n self._notional = notional\n self._daycount_fractions = daycount_fractions\n self._spread = spread\n self._currency = _get_attr(coupon_spec, \"currency\")\n self._daycount_fn = daycount_fn\n # Construct the reference curve object\n # Extract all rate_curves\n self._floating_rate_type = to_list(\n _get_attr(coupon_spec, \"floating_rate_type\"))\n self._currency = to_list(self._currency)\n if rate_index_curves is None:\n rate_index_curves = []\n for currency, floating_rate_type in zip(self._currency,\n self._floating_rate_type):\n rate_index_curves.append(curve_types_lib.RateIndexCurve(\n currency=currency, index=floating_rate_type))\n [\n self._reference_curve_type,\n self._reference_mask\n ] = process_curve_types(rate_index_curves, reference_mask)\n self._past_fixing = past_fixing\n\n def daycount_fn(self) -> Callable[..., Any]:\n return self._daycount_fn\n\n @property\n def notional(self) -> types.FloatTensor:\n return self._notional\n\n @property\n def discount_curve_type(self) -> _CurveType:\n return self._discount_curve_type\n\n @property\n def reference_curve_type(self) -> _CurveType:\n return self._reference_curve_type\n\n @property\n def batch_shape(self) -> types.StringTensor:\n return self._batch_shape\n\n @property\n def daycount_fractions(self) -> types.FloatTensor:\n return self._daycount_fractions\n\n @property\n def cashflow_dates(self) -> types.DateTensor:\n return self._coupon_end_dates\n\n @property\n def coupon_start_dates(self) -> types.DateTensor:\n return self._coupon_start_dates\n\n @property\n def coupon_end_dates(self) -> types.DateTensor:\n return self._coupon_end_dates\n\n def forward_rates(self,\n market: pmd.ProcessedMarketData,\n past_fixing: Optional[types.FloatTensor] = None,\n name: Optional[str] = None\n ) -> Tuple[types.DateTensor, types.FloatTensor]:\n \"\"\"Returns forward rates for the floating leg.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n past_fixing: An optional `Tensor` of shape compatible with\n `batch_shape + [1]`. Represents the fixings for the cashflows as\n observed at `market.date`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'forward_rates'.\n\n Returns:\n A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]`\n containing the dates and the corresponding forward rates for each stream\n based on the input market data.\n \"\"\"\n name = name or (self._name + \"_forward_rates\")\n with tf.name_scope(name):\n reference_curve = get_discount_curve(\n self._reference_curve_type, market, self._reference_mask)\n valuation_date = dateslib.convert_to_date_tensor(market.date)\n\n # Previous fixing date\n coupon_start_date_ord = self._coupon_start_dates.ordinal()\n coupon_end_date_ord = self._coupon_end_dates.ordinal()\n valuation_date_ord = valuation_date.ordinal()\n batch_shape = tf.shape(coupon_start_date_ord)[:-1]\n # Broadcast valuation date batch shape for tf.searchsorted\n valuation_date_ord += tf.expand_dims(\n tf.zeros(batch_shape, dtype=tf.int32), axis=-1)\n ind = tf.maximum(tf.searchsorted(coupon_start_date_ord,\n valuation_date_ord) - 1, 0)\n # Fixings are assumed to be the same as coupon start dates\n # TODO(b/177047910): add fixing settlement dates.\n # Shape `batch_shape + [1]`\n fixing_dates_ord = tf.gather(\n coupon_start_date_ord, ind,\n batch_dims=len(coupon_start_date_ord.shape) - 1)\n fixing_end_dates_ord = tf.gather(\n coupon_end_date_ord, ind,\n batch_dims=len(coupon_start_date_ord.shape) - 1)\n fixing_dates = dateslib.dates_from_ordinals(fixing_dates_ord)\n fixing_end_dates = dateslib.dates_from_ordinals(fixing_end_dates_ord)\n # Get fixings. Shape batch_shape + [1]\n if past_fixing is None:\n past_fixing = _get_fixings(\n fixing_dates,\n fixing_end_dates,\n self._reference_curve_type,\n self._reference_mask,\n market)\n else:\n past_fixing = tf.convert_to_tensor(past_fixing, dtype=self._dtype,\n name=\"past_fixing\")\n forward_rates = reference_curve.forward_rate(\n self._accrual_start_date,\n self._accrual_end_date,\n day_count_fraction=self._daycount_fractions)\n # Shape batch_shape + [num_cashflows]\n forward_rates = tf.where(self._daycount_fractions > 0., forward_rates,\n tf.zeros_like(forward_rates))\n # If coupon end date is before the valuation date, the payment is in the\n # past. If valuation date is between coupon start date and coupon end\n # date, then the rate has been fixed but not paid. Otherwise the rate is\n # not fixed and should be read from the curve.\n # Shape batch_shape + [num_cashflows]\n forward_rates = tf.where(\n self._coupon_end_dates < valuation_date,\n tf.constant(0, dtype=self._dtype),\n tf.where(self._coupon_start_dates >= valuation_date,\n forward_rates, past_fixing))\n return self._coupon_end_dates, forward_rates\n\n def cashflows(self,\n market: pmd.ProcessedMarketData,\n past_fixing: Optional[types.FloatTensor] = None,\n name: Optional[str] = None\n ) -> Tuple[types.DateTensor, types.FloatTensor]:\n \"\"\"Returns cashflows for the floating leg.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n past_fixing: An optional `Tensor` of shape compatible with\n `batch_shape + [1]`. Represents the fixings for the cashflows as\n observed at `market.date`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'cashflows'.\n\n Returns:\n A tuple of two `Tensor`s of shape `batch_shape + [num_cashflows]` and\n containing the dates and the corresponding cashflows price for each\n stream based on the input market data.\n \"\"\"\n name = name or (self._name + \"_cashflows\")\n with tf.name_scope(name):\n _, forward_rates = self.forward_rates(market, past_fixing=past_fixing)\n\n coupon_rate = forward_rates + tf.expand_dims(\n self._spread, axis=-1)\n # self._notion is of shape [batch_shape], so broadcasting is needed\n notional = tf.expand_dims(self._notional, axis=-1)\n\n cashflows = notional * (\n self._daycount_fractions * coupon_rate)\n return self._coupon_end_dates, cashflows\n\n def price(self,\n market: pmd.ProcessedMarketData,\n name: Optional[str] = None) -> types.FloatTensor:\n \"\"\"Returns the present value of the stream on the valuation date.\n\n Args:\n market: An instance of `ProcessedMarketData`.\n name: Python str. The name to give to the ops created by this function.\n Default value: `None` which maps to 'price'.\n\n Returns:\n A `Tensor` of shape `batch_shape` containing the modeled price of each\n stream based on the input market data.\n \"\"\"\n\n name = name or (self._name + \"_price\")\n with tf.name_scope(name):\n discount_curve = get_discount_curve(\n self._discount_curve_type, market, self._mask)\n discount_factors = discount_curve.discount_factor(self._coupon_end_dates)\n _, cashflows = self.cashflows(market, past_fixing=self._past_fixing)\n # Cashflows present values\n cashflow_pvs = cashflows * discount_factors\n return tf.math.reduce_sum(cashflow_pvs, axis=1)\n\n\ndef _generate_schedule(\n start_date: dateslib.DateTensor,\n end_date: dateslib.DateTensor,\n coupon_frequency: dateslib.PeriodTensor,\n calendar: dateslib.HolidayCalendar,\n roll_convention: dateslib.BusinessDayConvention,\n settlement_days: tf.Tensor,\n end_of_month: bool = False,\n first_coupon_date: Optional[dateslib.DateTensor] = None,\n penultimate_coupon_date: Optional[dateslib.DateTensor] = None) -> tf.Tensor:\n \"\"\"Method to generate coupon dates.\n\n Args:\n start_date: Starting dates of schedule.\n end_date: End dates of the schedule.\n coupon_frequency: A `PeriodTensor` specifying the frequency of coupon\n payments.\n calendar: calendar: An instance of `BankHolidays`.\n roll_convention: Business day roll convention of the schedule.\n settlement_days: An integer `Tensor` with the shape compatible with\n `start_date` and `end_date` specifying the number of settlement days.\n end_of_month: Python `bool`. If `True`, shifts all dates in schedule to\n the ends of corresponding months, if `start_date` or `end_date` (\n depending on `backward`) is at the end of a month. The shift is applied\n before applying `roll_convention`.\n first_coupon_date: First day of the irregular coupon, if any.\n penultimate_coupon_date: Penultimate day of the coupon, if any.\n\n Returns:\n A `DateTensor` containing the generated date schedule of shape\n `batch_shape + [max_num_coupon_days]`, where `max_num_coupon_days` is the\n number of coupon days for the longest living swap in the batch. The coupon\n days for the rest of the swaps are padded with their final coupon day.\n \"\"\"\n if first_coupon_date is not None and penultimate_coupon_date is not None:\n raise ValueError(\"Only first or last coupon dates can be specified \"\n \" for an irregular coupon.\")\n start_date = first_coupon_date or start_date\n # Adjust with settlement days\n start_date = calendar.add_business_days(\n start_date, settlement_days,\n roll_convention=roll_convention)\n if penultimate_coupon_date is None:\n backward = False\n else:\n backward = True\n end_date = end_date or penultimate_coupon_date\n # Adjust with settlement days\n end_date = calendar.add_business_days(\n end_date, settlement_days,\n roll_convention=roll_convention)\n coupon_dates = dateslib.PeriodicSchedule(\n start_date=start_date,\n end_date=end_date,\n tenor=coupon_frequency,\n roll_convention=roll_convention,\n backward=backward,\n end_of_month=end_of_month).dates()\n # Add the regular coupons\n coupon_dates = dateslib.DateTensor.concat(\n [start_date.expand_dims(-1),\n coupon_dates,\n end_date.expand_dims(-1)], axis=-1)\n return coupon_dates\n\n\ndef get_discount_curve(\n discount_curve_types: List[Union[curve_types_lib.RiskFreeCurve,\n curve_types_lib.RateIndexCurve]],\n market: pmd.ProcessedMarketData,\n mask: List[int]) -> rate_curve.RateCurve:\n \"\"\"Builds a batched discount curve.\n\n Given a list of discount curve an integer mask, creates a discount curve\n object to compute discount factors against the list of discount curves.\n\n #### Example\n ```none\n curve_types = [RiskFreeCurve(\"USD\"), RiskFreeCurve(\"AUD\")]\n # A mask to price a batch of 7 instruments with the corresponding discount\n # curves [\"USD\", \"AUD\", \"AUD\", \"AUD\" \"USD\", \"USD\", \"AUD\"].\n mask = [0, 1, 1, 1, 0, 0, 1]\n market = MarketDataDict(...)\n get_discount_curve(curve_types, market, mask)\n # Returns a RateCurve object that can compute a discount factors for a\n # batch of 7 dates.\n ```\n\n Args:\n discount_curve_types: A list of curve types.\n market: an instance of the processed market data.\n mask: An integer mask.\n\n Returns:\n An instance of `RateCurve`.\n \"\"\"\n discount_curves = [market.yield_curve(curve_type)\n for curve_type in discount_curve_types]\n discounts = []\n dates = []\n interpolation_method = None\n interpolate_rates = None\n for curve in discount_curves:\n discount, date = curve.discount_factors_and_dates()\n discounts.append(discount)\n dates.append(date)\n interpolation_method = curve.interpolation_method\n interpolate_rates = curve.interpolate_rates\n\n all_discounts = tf.stack(pad.pad_tensors(discounts), axis=0)\n all_dates = pad.pad_date_tensors(dates)\n all_dates = dateslib.DateTensor.stack(dates, axis=0)\n prepare_discounts = tf.gather(all_discounts, mask)\n prepare_dates = dateslib.dates_from_ordinals(\n tf.gather(all_dates.ordinal(), mask))\n # All curves are assumed to have the same interpolation method\n # TODO(b/168411153): Extend to the case with multiple curve configs.\n discount_curve = rate_curve.RateCurve(\n prepare_dates, prepare_discounts, market.date,\n interpolator=interpolation_method,\n interpolate_rates=interpolate_rates)\n return discount_curve\n\n\ndef _get_fixings(start_dates,\n end_dates,\n reference_curve_types,\n reference_mask,\n market):\n \"\"\"Computes fixings for a list of reference curves.\"\"\"\n num_curves = len(reference_curve_types)\n if num_curves > 1:\n # For each curve get corresponding cashflow indices\n split_indices = [tf.squeeze(tf.where(tf.equal(reference_mask, i)), -1)\n for i in range(num_curves)]\n else:\n split_indices = [0]\n fixings = []\n start_dates_ordinal = start_dates.ordinal()\n end_dates_ordinal = end_dates.ordinal()\n for idx, reference_curve_type in zip(split_indices, reference_curve_types):\n if num_curves > 1:\n # Get all dates corresponding to the reference curve\n start_date = dateslib.dates_from_ordinals(\n tf.gather(start_dates_ordinal, idx))\n end_date = dateslib.dates_from_ordinals(\n tf.gather(end_dates_ordinal, idx))\n else:\n start_date = start_dates\n end_date = end_dates\n fixing, fixing_daycount = market.fixings(start_date, reference_curve_type)\n if fixing_daycount is not None:\n fixing_daycount = market_data_utils.get_daycount_fn(\n fixing_daycount, dtype=market.dtype)\n year_fraction = fixing_daycount(start_date=start_date, end_date=end_date)\n else:\n year_fraction = 0.0\n fixings.append(\n fixing * year_fraction)\n fixings = pad.pad_tensors(fixings)\n all_indices = tf.concat(split_indices, axis=0)\n all_fixings = tf.concat(fixings, axis=0)\n if num_curves > 1:\n return tf.gather(all_fixings, tf.argsort(all_indices))\n else:\n return all_fixings\n\n\ndef process_curve_types(\n curve_types: List[Union[curve_types_lib.RiskFreeCurve,\n curve_types_lib.RateIndexCurve]],\n mask=None\n ) -> Tuple[\n List[Union[curve_types_lib.RiskFreeCurve,\n curve_types_lib.RateIndexCurve]],\n List[int]]:\n \"\"\"Extracts unique curves and computes an integer mask.\n\n #### Example\n ```python\n curve_types = [RiskFreeCurve(\"USD\"), RiskFreeCurve(\"AUD\"),\n RiskFreeCurve(\"USD\")]\n process_curve_types(curve_types)\n # Returns [RiskFreeCurve(\"AUD\"), RiskFreeCurve(\"USD\")], [1, 0, 1]\n ```\n Args:\n curve_types: A list of either `RiskFreeCurve` or `RateIndexCurve`.\n mask: An optional integer mask for the sorted curve type sequence. If\n supplied, the function returns does not do anything and returns\n `(curve_types, mask)`.\n\n Returns:\n A Tuple of `(curve_list, mask)` where `curve_list` is a list of unique\n curves in `curve_types` and `mask` is a list of integers which is the\n mask for `curve_types`.\n \"\"\"\n def _get_signature(curve):\n \"\"\"Converts curve infromation to a string.\"\"\"\n if isinstance(curve, curve_types_lib.RiskFreeCurve):\n return curve.currency.value\n elif isinstance(curve, curve_types_lib.RateIndexCurve):\n return (curve.currency.value + \"_\" + curve.index.type.value\n + \"_\" + \"_\".join(curve.index.source)\n + \"_\" + \"_\".join(curve.index.name))\n else:\n raise ValueError(f\"{type(curve)} is not supported.\")\n curve_list = to_list(curve_types)\n if mask is not None:\n return curve_list, mask\n curve_hash = [_get_signature(curve_type) for curve_type in curve_list]\n hash_discount_map = {\n _get_signature(curve_type): curve_type for curve_type in curve_list}\n mask, mask_map, num_unique_discounts = create_mask(curve_hash)\n discount_curve_types = [\n hash_discount_map[mask_map[i]]\n for i in range(num_unique_discounts)]\n return discount_curve_types, mask\n\n\ndef create_mask(x):\n \"\"\"Given a list of object creates integer mask for unique values in the list.\n\n Args:\n x: 1-d numpy array.\n\n Returns:\n A tuple of three objects:\n * A list of integers that is the mask for `x`,\n * A dictionary map between entries of `x` and the list\n * The number of unique elements.\n \"\"\"\n # For example, create_mask([\"USD\", \"AUD\", \"USD\"]) returns\n # a list [1, 0, 1], a map {0: \"AUD\", 1: \"USD\"} and the number of unique\n # elements which is 2.\n # Note that elements of `x` are being sorted\n unique = np.unique(x)\n num_unique_elems = len(unique)\n keys = range(num_unique_elems)\n d = dict(zip(unique, keys))\n mask_map = dict(zip(keys, unique))\n return [d[el] for el in x], mask_map, num_unique_elems\n\n\ndef to_list(x):\n \"\"\"Converts input to a list if necessary.\"\"\"\n if isinstance(x, (list, tuple)):\n return x\n else:\n return [x]\n\n\ndef _get_attr(obj, key):\n if isinstance(obj, dict):\n return obj[key]\n else:\n return obj.__getattribute__(key)\n\n\n__all__ = [\"FixedCashflowStream\", \"FloatingCashflowStream\"]\n"
] |
[
[
"tensorflow.compat.v2.math.exp",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.transpose",
"tensorflow.compat.v2.math.is_nan",
"numpy.sqrt",
"tensorflow.compat.v2.broadcast_to",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.nn.relu",
"numpy.array"
],
[
"tensorflow.compat.v2.math.reduce_max",
"numpy.log",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.range",
"tensorflow.compat.v2.ones",
"numpy.diff",
"numpy.exp",
"numpy.array",
"tensorflow.compat.v2.constant",
"numpy.sum"
],
[
"numpy.array",
"tensorflow.compat.v2.test.main",
"numpy.testing.assert_allclose"
],
[
"tensorflow.python.util.all_util.remove_undocumented"
],
[
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.argsort",
"tensorflow.compat.v2.searchsorted",
"tensorflow.compat.v2.equal",
"numpy.unique",
"tensorflow.compat.v2.concat",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.math.reduce_sum",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.expand_dims",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.zeros",
"tensorflow.compat.v2.constant"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
bupt-nlp/model-getting-started
|
[
"d3afee2eb91b90b5910fd0473a67a9b3343298b2"
] |
[
"classifier.py"
] |
[
"from __future__ import annotations\nimport datetime\nimport os\nimport pickle\n\nimport numpy as np\nimport torch\n\nfrom src.data_process import THCNewsDataProcessor, DataIterator, THCNewsFeaturesExtractor\nfrom src.models.base_model import SequenceClassificationModel, BiLSTMSequenceClassificationModel\nfrom src.schema import Config\nfrom train import (train, test)\n\n\ndef create_model_config() -> Config:\n config = Config()\n # set data path, contains: train and test file\n root = os.path.join(os.path.abspath('.'), 'data')\n config.data_dir = os.path.join(root, 'THCNews')\n config.language = 'zh'\n # pretrained model path, contains:\n # 1. pretrained model's binary file\n # 2. vocab\n pretrained_path = os.path.join(os.path.join(root, 'pretrained'), config.language)\n config.vocab_file = os.path.join(pretrained_path, 'vocab.pkl')\n config.pretrained_model_name = 'embedding_SougouNews.npz'\n config.pretrained_file = os.path.join(pretrained_path, config.pretrained_model_name)\n # save log with time here\n config.log_dir = os.path.join(config.data_dir, 'log')\n if not os.path.exists(config.log_dir):\n os.mkdir(config.log_dir)\n # save model after training here\n config.output_dir = os.path.join(config.data_dir, 'save_dict')\n if not os.path.exists(config.output_dir):\n os.mkdir(config.output_dir)\n # load pretrained model\n config.pretrained_model = torch.tensor(\n np.load(config.pretrained_file)[\"embeddings\"].astype('float32'))\n config.embedding_dim = config.pretrained_model.size()[1]\n config.hidden_size = 128\n config.num_layers = 2\n config.dropout = 0.5\n config.num_labels = 10\n config.max_seq_length = 40\n config.num_epochs = 8\n config.class_list = []\n with open(os.path.join(config.data_dir, 'class.txt')) as f:\n lines = f.readlines()\n for line in lines:\n config.class_list.append(line.strip())\n return config\n\n\ndef create_sequence_classification_model(config: Config) -> SequenceClassificationModel:\n model = BiLSTMSequenceClassificationModel(config)\n return model\n\n\ndef get_data_iterator(config: Config) -> (DataIterator, DataIterator, DataIterator):\n data_processor = THCNewsDataProcessor()\n train_file = os.path.join(config.data_dir, 'train.txt')\n train_examples = data_processor.get_examples(train_file)\n dev_file = os.path.join(config.data_dir, 'dev.txt')\n dev_examples = data_processor.get_examples(dev_file)\n test_file = os.path.join(config.data_dir, 'test.txt')\n test_examples = data_processor.get_examples(test_file)\n print(f'Trainset Length: {len(train_examples)}, Example: {train_examples[0]}')\n print(f'Dev Length: {len(dev_examples)}, Example: {dev_examples[0]}')\n print(f'Testset Length: {len(test_examples)}, Example: {test_examples[0]}')\n\n vocab = pickle.load(open(config.vocab_file, 'rb'))\n train_iterator = THCNewsFeaturesExtractor(vocab, train_examples).get_data_iterator(\n batch_size=config.train_batch_size, max_len=config.max_seq_length, do_test=False)\n dev_iterator = THCNewsFeaturesExtractor(vocab, dev_examples).get_data_iterator(\n batch_size=config.eval_batch_size, max_len=config.max_seq_length, do_test=False)\n test_iterator = THCNewsFeaturesExtractor(vocab, test_examples).get_data_iterator(\n batch_size=config.predict_batch_size, max_len=config.max_seq_length, do_test=True)\n return train_iterator, dev_iterator, test_iterator\n\n\nconfig = create_model_config()\n# config = config.parse_args(known_only=True)\n# 0. Load vocab\nvocab = pickle.load(open(config.vocab_file, 'rb'))\nconfig.n_vocab = len(vocab)\n# 1. load data iterator\ntrain_iterator, dev_iterator, test_iterator = get_data_iterator(config)\nmodel = create_sequence_classification_model(config)\nprint(model)\nmodel = model.to(config.device)\n# train(config, model, train_iterator, dev_iterator, test_iterator)\ntest(model, test_iterator, config)\n"
] |
[
[
"numpy.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
w6688j/bert-utils
|
[
"a066214e9cbd3ff5cc905f94c7354189d986a1fd"
] |
[
"args.py"
] |
[
"import os\nimport tensorflow as tf\n\ntf.logging.set_verbosity(tf.logging.INFO)\n\nfile_path = os.path.dirname(__file__)\n\nmodel_dir = os.path.join(file_path, 'chinese_L-12_H-768_A-12/')\nconfig_name = os.path.join(model_dir, 'bert_config.json')\nckpt_name = os.path.join(model_dir, 'bert_model.ckpt')\noutput_dir = os.path.join(model_dir, '../tmp/result/')\nvocab_file = os.path.join(model_dir, 'vocab.txt')\ndata_dir = os.path.join(model_dir, '../data/')\n\nnum_train_epochs = 10\nbatch_size = 128\nlearning_rate = 0.00005\n\n# gpu使用率\ngpu_memory_fraction = 0.8\n\n# 默认取倒数第二层的输出值作为句向量\nlayer_indexes = [-2]\n\n# 序列的最大程度,单文本建议把该值调小\nmax_seq_len = 32\n"
] |
[
[
"tensorflow.logging.set_verbosity"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
JackCaster/pytorch-lightning
|
[
"c94c0a2b1ee6b444ab1ecf58059e922229d44436"
] |
[
"tests/callbacks/test_model_checkpoint.py"
] |
[
"import os\nimport pickle\nimport platform\nimport re\nfrom pathlib import Path\n\nimport cloudpickle\nimport pytest\nimport tests.base.develop_utils as tutils\nimport torch\nfrom pytorch_lightning import Trainer, seed_everything\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nfrom pytorch_lightning.loggers import TensorBoardLogger\nfrom tests.base import EvalModelTemplate\n\n\[email protected](\"save_top_k\", [-1, 0, 1, 2])\ndef test_model_checkpoint_with_non_string_input(tmpdir, save_top_k):\n \"\"\"Test that None in checkpoint callback is valid and that ckpt_path is set correctly\"\"\"\n tutils.reset_seed()\n model = EvalModelTemplate()\n\n checkpoint = ModelCheckpoint(filepath=None, save_top_k=save_top_k)\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n checkpoint_callback=checkpoint,\n overfit_batches=0.20,\n max_epochs=2,\n )\n trainer.fit(model)\n assert (\n checkpoint.dirpath == tmpdir / trainer.logger.name / \"version_0\" / \"checkpoints\"\n )\n\n\[email protected](\n \"logger_version,expected\",\n [(None, \"version_0\"), (1, \"version_1\"), (\"awesome\", \"awesome\")],\n)\ndef test_model_checkpoint_path(tmpdir, logger_version, expected):\n \"\"\"Test that \"version_\" prefix is only added when logger's version is an integer\"\"\"\n tutils.reset_seed()\n model = EvalModelTemplate()\n logger = TensorBoardLogger(str(tmpdir), version=logger_version)\n\n trainer = Trainer(\n default_root_dir=tmpdir, overfit_batches=0.2, max_epochs=2, logger=logger\n )\n trainer.fit(model)\n\n ckpt_version = Path(trainer.checkpoint_callback.dirpath).parent.name\n assert ckpt_version == expected\n\n\ndef test_pickling(tmpdir):\n ckpt = ModelCheckpoint(tmpdir)\n\n ckpt_pickled = pickle.dumps(ckpt)\n ckpt_loaded = pickle.loads(ckpt_pickled)\n assert vars(ckpt) == vars(ckpt_loaded)\n\n ckpt_pickled = cloudpickle.dumps(ckpt)\n ckpt_loaded = cloudpickle.loads(ckpt_pickled)\n assert vars(ckpt) == vars(ckpt_loaded)\n\n\nclass ModelCheckpointTestInvocations(ModelCheckpoint):\n # this class has to be defined outside the test function, otherwise we get pickle error\n # due to the way ddp process is launched\n\n def __init__(self, expected_count, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.count = 0\n self.expected_count = expected_count\n\n def _save_model(self, filepath, trainer, pl_module):\n # make sure we don't save twice\n assert not os.path.isfile(filepath)\n self.count += 1\n super()._save_model(filepath, trainer, pl_module)\n\n def on_train_end(self, trainer, pl_module):\n super().on_train_end(trainer, pl_module)\n # on rank 0 we expect the saved files and on all others no saves\n assert (trainer.global_rank == 0 and self.count == self.expected_count) or (\n trainer.global_rank > 0 and self.count == 0\n )\n\n\[email protected](\n platform.system() == \"Windows\",\n reason=\"Distributed training is not supported on Windows\",\n)\ndef test_model_checkpoint_no_extraneous_invocations(tmpdir):\n \"\"\"Test to ensure that the model callback saves the checkpoints only once in distributed mode.\"\"\"\n model = EvalModelTemplate()\n num_epochs = 4\n model_checkpoint = ModelCheckpointTestInvocations(\n expected_count=num_epochs, save_top_k=-1\n )\n trainer = Trainer(\n distributed_backend=\"ddp_cpu\",\n num_processes=2,\n default_root_dir=tmpdir,\n early_stop_callback=False,\n checkpoint_callback=model_checkpoint,\n max_epochs=num_epochs,\n )\n result = trainer.fit(model)\n assert 1 == result\n\n\ndef test_model_checkpoint_format_checkpoint_name(tmpdir):\n # empty filename:\n ckpt_name = ModelCheckpoint._format_checkpoint_name('', 3, {})\n assert ckpt_name == 'epoch=3'\n ckpt_name = ModelCheckpoint._format_checkpoint_name(None, 3, {}, prefix='test')\n assert ckpt_name == 'test-epoch=3'\n # no groups case:\n ckpt_name = ModelCheckpoint._format_checkpoint_name('ckpt', 3, {}, prefix='test')\n assert ckpt_name == 'test-ckpt'\n # no prefix\n ckpt_name = ModelCheckpoint._format_checkpoint_name('{epoch:03d}-{acc}', 3, {'acc': 0.03})\n assert ckpt_name == 'epoch=003-acc=0.03'\n # prefix\n char_org = ModelCheckpoint.CHECKPOINT_JOIN_CHAR\n ModelCheckpoint.CHECKPOINT_JOIN_CHAR = '@'\n ckpt_name = ModelCheckpoint._format_checkpoint_name('{epoch},{acc:.5f}', 3, {'acc': 0.03}, prefix='test')\n assert ckpt_name == 'test@epoch=3,acc=0.03000'\n ModelCheckpoint.CHECKPOINT_JOIN_CHAR = char_org\n # no filepath set\n ckpt_name = ModelCheckpoint(filepath=None).format_checkpoint_name(3, {})\n assert ckpt_name == 'epoch=3.ckpt'\n ckpt_name = ModelCheckpoint(filepath='').format_checkpoint_name(5, {})\n assert ckpt_name == 'epoch=5.ckpt'\n # CWD\n ckpt_name = ModelCheckpoint(filepath='.').format_checkpoint_name(3, {})\n assert Path(ckpt_name) == Path('.') / 'epoch=3.ckpt'\n # dir does not exist so it is used as filename\n filepath = tmpdir / 'dir'\n ckpt_name = ModelCheckpoint(filepath=filepath, prefix='test').format_checkpoint_name(3, {})\n assert ckpt_name == tmpdir / 'test-dir.ckpt'\n # now, dir exists\n os.mkdir(filepath)\n ckpt_name = ModelCheckpoint(filepath=filepath, prefix='test').format_checkpoint_name(3, {})\n assert ckpt_name == filepath / 'test-epoch=3.ckpt'\n # with ver\n ckpt_name = ModelCheckpoint(filepath=tmpdir / 'name', prefix='test').format_checkpoint_name(3, {}, ver=3)\n assert ckpt_name == tmpdir / 'test-name-v3.ckpt'\n\n\ndef test_model_checkpoint_save_last(tmpdir):\n \"\"\"Tests that save_last produces only one last checkpoint.\"\"\"\n model = EvalModelTemplate()\n epochs = 3\n ModelCheckpoint.CHECKPOINT_NAME_LAST = 'last-{epoch}'\n model_checkpoint = ModelCheckpoint(filepath=tmpdir, save_top_k=-1, save_last=True)\n trainer = Trainer(\n default_root_dir=tmpdir,\n early_stop_callback=False,\n checkpoint_callback=model_checkpoint,\n max_epochs=epochs,\n )\n trainer.fit(model)\n last_filename = model_checkpoint._format_checkpoint_name(ModelCheckpoint.CHECKPOINT_NAME_LAST, epochs - 1, {})\n last_filename = last_filename + '.ckpt'\n assert str(tmpdir / last_filename) == model_checkpoint.last_model_path\n assert set(os.listdir(tmpdir)) == set(\n [f'epoch={i}.ckpt' for i in range(epochs)] + [last_filename, 'lightning_logs']\n )\n ModelCheckpoint.CHECKPOINT_NAME_LAST = 'last'\n\n\ndef test_model_checkpoint_save_last_checkpoint_contents(tmpdir):\n \"\"\"Tests that the save_last checkpoint contains the latest information.\"\"\"\n seed_everything(100)\n model = EvalModelTemplate()\n num_epochs = 3\n model_checkpoint = ModelCheckpoint(filepath=tmpdir, save_top_k=num_epochs, save_last=True)\n trainer = Trainer(\n default_root_dir=tmpdir,\n early_stop_callback=False,\n checkpoint_callback=model_checkpoint,\n max_epochs=num_epochs,\n )\n trainer.fit(model)\n\n path_last_epoch = model_checkpoint.format_checkpoint_name(num_epochs - 1, {})\n assert path_last_epoch != model_checkpoint.last_model_path\n\n ckpt_last_epoch = torch.load(path_last_epoch)\n ckpt_last = torch.load(model_checkpoint.last_model_path)\n assert all(ckpt_last_epoch[k] == ckpt_last[k] for k in (\"epoch\", \"global_step\"))\n assert all(\n ckpt_last[\"callbacks\"][type(model_checkpoint)][k] == ckpt_last_epoch[\"callbacks\"][type(model_checkpoint)][k]\n for k in (\"best_model_score\", \"best_model_path\")\n )\n\n # it is easier to load the model objects than to iterate over the raw dict of tensors\n model_last_epoch = EvalModelTemplate.load_from_checkpoint(path_last_epoch)\n model_last = EvalModelTemplate.load_from_checkpoint(model_checkpoint.last_model_path)\n for w0, w1 in zip(model_last_epoch.parameters(), model_last.parameters()):\n assert w0.eq(w1).all()\n\n\ndef test_ckpt_metric_names(tmpdir):\n model = EvalModelTemplate()\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n gradient_clip_val=1.0,\n overfit_batches=0.20,\n progress_bar_refresh_rate=0,\n limit_train_batches=0.01,\n limit_val_batches=0.01,\n checkpoint_callback=ModelCheckpoint(filepath=tmpdir + \"/{val_loss:.2f}\"),\n )\n\n trainer.fit(model)\n\n # make sure the checkpoint we saved has the metric in the name\n ckpts = os.listdir(tmpdir)\n ckpts = [x for x in ckpts if \"val_loss\" in x]\n assert len(ckpts) == 1\n val = re.sub(\"[^0-9.]\", \"\", ckpts[0])\n assert len(val) > 3\n\n\ndef test_ckpt_metric_names_results(tmpdir):\n model = EvalModelTemplate()\n model.training_step = model.training_step_result_obj\n model.training_step_end = None\n model.training_epoch_end = None\n\n model.validation_step = model.validation_step_result_obj\n model.validation_step_end = None\n model.validation_epoch_end = None\n\n trainer = Trainer(\n default_root_dir=tmpdir,\n max_epochs=1,\n gradient_clip_val=1.0,\n overfit_batches=0.20,\n progress_bar_refresh_rate=0,\n limit_train_batches=0.01,\n limit_val_batches=0.01,\n checkpoint_callback=ModelCheckpoint(filepath=tmpdir + \"/{val_loss:.2f}\"),\n )\n\n trainer.fit(model)\n\n # make sure the checkpoint we saved has the metric in the name\n ckpts = os.listdir(tmpdir)\n ckpts = [x for x in ckpts if \"val_loss\" in x]\n assert len(ckpts) == 1\n val = re.sub(\"[^0-9.]\", \"\", ckpts[0])\n assert len(val) > 3\n"
] |
[
[
"torch.load"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
hebpmo/TMA
|
[
"b07747d3112e822ff92dd2ba4589d2288adab154",
"b07747d3112e822ff92dd2ba4589d2288adab154"
] |
[
"tma/collector/xhn.py",
"tma/collector/sse.py"
] |
[
"# -*- coding: UTF-8 -*-\n\n\"\"\"\ncollector.xhn - 新华网数据采集\n\n官网:http://www.xinhuanet.com/\n\n接口分析:\n1. 获取文章列表\nhttp://qc.wa.news.cn/nodeart/list?nid=115093&pgnum=1&cnt=10000\n\n新华全媒体头条\nhttp://www.xinhuanet.com/politics/qmtt/index.htm\n====================================================================\n\"\"\"\n\nimport requests\nimport re\nfrom datetime import datetime\nfrom bs4 import BeautifulSoup\nfrom zb.crawlers.utils import get_header\nimport traceback\nimport pandas as pd\nfrom tqdm import tqdm\n\nimport tma\n\nhome_url = \"http://www.xinhuanet.com/\"\n\n\ndef get_website_map():\n wzdt_url = \"http://www.xinhuanet.com/wzdt2014.htm\"\n html = requests.get(wzdt_url, headers=get_header())\n bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')\n map_raw = bsobj.find('div', {'class': \"content_left\"})\n raise NotImplementedError\n\n\ndef get_special_topics(pgnum=1):\n \"\"\"获取专题列表\"\"\"\n url = \"http://qc.wa.news.cn/nodeart/list?\" \\\n \"nid=115093&pgnum=%s&cnt=200\" % str(pgnum)\n res = requests.get(url).text\n res = res.replace(\"null\", \"\\'\\'\")\n res = eval(res)\n assert res['status'] == 0, \"获取文章列表失败\"\n data = res['data']['list']\n specials = []\n for a in data:\n special = {\n \"Abstract\": a['Abstract'],\n \"Author\": a['Author'],\n \"LinkUrl\": a['LinkUrl'],\n \"PubTime\": a['PubTime'],\n \"Title\": a['Title'],\n \"allPics\": a['allPics'],\n }\n specials.append(special)\n return specials\n\n\ndef get_article_detail(article_url):\n \"\"\"获取新华网article_url中的文章内容\n\n :param article_url: 文章url\n :return:\n {\n \"url\": article_url,\n \"title\": title,\n \"pub_time\": pub_time,\n \"source\": source,\n \"content\": content\n }\n \"\"\"\n # article_url = \"http://www.xinhuanet.com/fortune/2018-06/20/c_129897476.htm\"\n html = requests.get(article_url, headers=get_header())\n bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')\n\n # 解析标题\n cols = bsobj.find('div', {\"class\": \"h-news\"}).text.strip().split(\"\\r\\n\")\n title = cols[0].strip()\n pub_time = cols[1].strip()\n source = cols[-1].strip()\n\n # 解析内容\n content = bsobj.find('div', {\"id\": \"p-detail\"}).text.strip()\n content = content.replace(\"\\u3000\\u3000\", \"\")\n content = [x.strip() for x in content.split(\"\\n\")]\n content = [x for x in content if x != \"\"]\n content = \"\\n\".join(content)\n\n return {\n \"url\": article_url,\n \"title\": title,\n \"pub_time\": pub_time,\n \"source\": source,\n \"content\": content\n }\n\n\nclass HomePage(object):\n \"\"\"新华网首页\"\"\"\n\n def __init__(self):\n self.home_url = \"http://www.xinhuanet.com/\"\n\n @staticmethod\n def _get_date_from_url(url):\n pat = re.compile(\"(\\d{4}-\\d{2}[/-]\\d{2})\")\n res = pat.findall(url)\n if res is not None and len(res) == 1:\n return res[0].replace('/', \"-\")\n else:\n return None\n\n def get_article_list(self, d=None):\n \"\"\"获取首页的头条文章列表\"\"\"\n html = requests.get(self.home_url, headers=get_header())\n bsobj = BeautifulSoup(html.content.decode('utf-8'), 'lxml')\n\n a_list = []\n for a in bsobj.find_all(\"a\"):\n try:\n url = a['href']\n title = a.text.strip()\n date_ = self._get_date_from_url(url)\n a_list.append([url, title, date_])\n except:\n if tma.DEBUG:\n traceback.print_exc()\n continue\n\n a_list = [a for a in a_list if\n a[0] != \"\"\n and a[0].strip(\"/\") != \"http://xhgy.xinhuanet.com\"\n and a[0].startswith(\"http\")\n and a[1] != \"\"\n and a[1] != \"视频MP4地址\"\n and \"c_\" in a[0]\n and a[2] != \"\"\n # and 'photo' not in a[0]\n # and 'video' not in a[0]\n ]\n\n # 根据url去重\n df = pd.DataFrame(a_list, columns=['url', 'title', 'date'])\n df.drop_duplicates('url', inplace=True)\n res = [list(x) for x in list(df.values)]\n\n if d is None:\n date_list = [datetime.now().date().__str__()]\n else:\n date_list = d\n res = [a for a in res if a[2] in date_list]\n res = sorted(res, key=lambda x: x[2], reverse=True)\n return res\n\n def get_articles(self, d=None):\n \"\"\"获取首页文章内容\n\n :param d: list\n 限定获取文章的日期,默认是当日日期,可以指定多个离散的日期\n :return: list\n \"\"\"\n # 获取首页文章列表URL、按发布日期过滤、按URL去重\n res = self.get_article_list(d)\n a_list = [a[0] for a in res]\n a_list = list(set(a_list))\n\n articles = []\n for a in tqdm(a_list, ncols=100, desc=\"xhn.get_articles\"):\n try:\n article = get_article_detail(a)\n articles.append(article)\n except:\n if tma.DEBUG:\n traceback.print_exc()\n return articles\n\n\nclass Fortune(object):\n def __init__(self):\n self.url1 = \"http://www.xinhuanet.com/fortune/\"\n self.url2 = \"http://www.xinhuanet.com/fortune/caiyan.htm\"\n self.url3 = \"http://www.xinhuanet.com/fortune/cfx.htm\"\n self.url4 = \"http://www.xinhuanet.com/fortune/bcxc.htm\"\n",
"# -*- coding: UTF-8 -*-\n\n\"\"\"\ncollector.sse - 采集上海证券交易所的数据\n\n官网:http://www.sse.com.cn/\n====================================================================\n\"\"\"\n\nimport requests\nimport pandas as pd\n\n\ndef get_sh_indexes():\n \"\"\"获取上海证券交易所所有指数的实时行情\"\"\"\n url = \"http://www.sse.com.cn/js/common/indexQuotes.js\"\n res = requests.get(url).text\n lines = res.split(\"\\n\")\n lines = [x.replace('_t.push(', \"\").strip(\");'\") for x in lines if \"_t.push(\" in x]\n lines = [\n eval(line, type('Dummy', (dict,), dict(__getitem__=lambda s, n: n))())\n for line in lines\n ]\n\n index_sh = pd.DataFrame(lines)\n index_sh = index_sh[['JC', 'ZSDM', 'abbr', 'ZRSP', 'DRKP', 'DRSP', 'DRZD', 'DRZX', 'ZDF']]\n index_sh = index_sh.rename(columns={\n 'JC': 'name',\n 'ZSDM': 'code',\n 'abbr': 'kind',\n 'ZRSP': 'preclose',\n 'DRKP': 'open',\n 'DRSP': 'close',\n 'DRZD': 'high',\n 'DRZX': 'low',\n 'ZDF': 'change'\n })\n\n # index_sh.astype()\n return index_sh\n"
] |
[
[
"pandas.DataFrame"
],
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
BBarbara-fr/pySPM
|
[
"6dfd59b0e873173c455b1085e091495cf775f852",
"6dfd59b0e873173c455b1085e091495cf775f852"
] |
[
"pySPM/tools/spectra.py",
"pySPM/ITAslicer.py"
] |
[
"from PyQt5.QtWidgets import QMainWindow, QShortcut, QApplication, QFileDialog, QSizePolicy, QTableWidgetItem\nfrom PyQt5.QtCore import Qt, QSettings, QDir, QFileInfo\nfrom pySPM.tools.spectraviewer import Ui_SpectraViewer\nimport pySPM\nimport sys\nimport numpy as np\nimport os\nimport struct\n\nDPI = 100.0\nclass SpectraViewer(QMainWindow):\n def __init__(self, filename=None, parent=None):\n super(SpectraViewer, self).__init__(parent)\n self.ui = Ui_SpectraViewer()\n self.ui.setupUi(self)\n self.sf = 7200\n self.k0 = 0\n self.dsf = 0\n self.dk0 = 0\n self.ita = None\n self.fig = self.ui.mpl.canvas.fig\n self.canvas = self.ui.mpl.canvas\n self.ax = self.fig.add_subplot(111)\n self.nextMass = QShortcut(Qt.Key_Plus, self)\n self.prevMass = QShortcut(Qt.Key_Minus, self)\n self.nextMass.activated.connect(self.next_mass)\n self.prevMass.activated.connect(self.prev_mass)\n self.ui.pushButton_2.clicked.connect(self.toggleMassCal)\n self.ui.pushButton.clicked.connect(self.removeMassCalItem)\n self.ui.show_mass.clicked.connect(self.yAxisScaleChanged)\n self.canvas.mpl_connect('motion_notify_event', self.on_motion)\n self.canvas.mpl_connect('button_press_event', self.onMousePress)\n self.canvas.mpl_connect('button_release_event', self.onMouseRelease)\n self.canvas.mpl_connect(\"scroll_event\", self.scrolling)\n self.labels = []\n self.action = None\n self.lab_lines = []\n self.MassCal = []\n self.open(filename)\n\n def resizeEvent(self, event):\n return\n sx, sy = self.ui.fig.width()/DPI, self.ui.fig.height()/DPI\n self.fig.set_size_inches(sx, sy)\n self.ui.fig.updateGeometry()\n self.canvas.updateGeometry()\n self.canvas.draw()\n self.canvas.flush_events()\n\n def __del__(self):\n if self.ita is not None:\n del self.ita\n self.ita = None\n\n def removeMassCalItem(self):\n row = self.ui.tableMassCal.selectedItems()[0].row()\n del self.MassCal[row]\n self.DoMassCal()\n \n def refresh(self):\n r = self.ax.get_xlim()\n self.yAxisScaleChanged()\n self.canvas.draw()\n self.canvas.flush_events()\n\n def next_mass(self):\n r = self.ax.get_xlim()\n self.ax.set_xlim(r[0]+1, r[1]+1)\n self.refresh()\n\n def prev_mass(self):\n r = self.ax.get_xlim()\n self.ax.set_xlim(r[0]-1, r[1]-1)\n self.refresh()\n\n def clear_labels(self):\n for x in self.labels:\n x.remove()\n self.labels[:] = []\n for x in self.lab_lines:\n self.ax.lines.remove(x)\n self.lab_lines[:] = []\n\n def plot_labels(self, colors=['r','g','b']):\n r = self.ax.get_xlim() \n E = []\n for nm in range(int(np.round(r[0],0)), int(np.round(r[1],0))+1):\n E += pySPM.utils.get_peaklist(nm, self.ita.polarity=='Negative')\n m0s = [pySPM.utils.get_mass(x) for x in E]\n P = list(zip(m0s, E))\n P.sort(key=lambda x: x[0])\n y = self.ax.get_ylim()[1]\n for i, (mi, Ei) in enumerate(P):\n dmi = 2*np.sqrt(mi)*np.sqrt((self.dk0**2/(self.sf**2))+mi*(self.dsf**2/(self.sf**2)))\n col = colors[i%len(colors)]\n self.lab_lines.append(self.ax.axvline(mi, color=col, alpha=.5))\n self.labels.append(self.ax.annotate(Ei, (mi, y), (5, 0), rotation=90, va='top', ha='left', textcoords='offset pixels'))\n if dmi>0:\n self.lab_lines.append(self.ax.axvline(mi+dmi, color=col, alpha=.5, ls=':'))\n self.lab_lines.append(self.ax.axvline(mi-dmi, color=col, alpha=.5, ls=':'))\n\n def yAxisScaleChanged(self):\n r = self.ax.get_xlim()\n delta = r[1]-r[0]\n self.clear_labels() \n \n if self.ita is not None:\n SatLevel = self.ita.size['pixels']['x']*self.ita.size['pixels']['y']*self.ita.Nscan\n self.sat_level.set_ydata(SatLevel)\n \n max = 0\n left = int(pySPM.utils.mass2time(r[0], sf=self.sf, k0=self.k0)/2)\n right = int(pySPM.utils.mass2time(r[1], sf=self.sf, k0=self.k0)/2)+1\n if left<0:\n left = 0\n if right >= len(self.S):\n right = len(self.S)-1\n if left<self.t[-1] and right>0:\n max = np.max(self.S[left:right+1])\n if max>0:\n self.ax.set_ylim(0, 1.2*max)\n if delta<10:\n self.ui.show_mass.setEnabled(True)\n if self.ui.show_mass.isChecked():\n self.plot_labels()\n else:\n self.ui.show_mass.setEnabled(False)\n m0 = pySPM.utils.time2mass(left+right, self.sf, self.k0)\n dm = 2*np.sqrt(m0)*np.sqrt((self.dk0**2/(self.sf**2))+m0*(self.dsf**2/(self.sf**2)))\n self.ui.lab_m0.setText(\"m0 = {:.5f} ± {:.5f}\".format(m0,dm))\n\n def scrolling(self, event):\n r = self.ax.get_xlim()\n delta = (r[1]-r[0])\n m0 = event.xdata\n zfact = 2\n if event.button ==\"down\":\n zfact = 1/zfact\n low = m0-(m0-r[0])*zfact\n high = m0+(r[1]-m0)*zfact\n self.ax.set_xlim((low, high))\n self.refresh()\n \n def open(self, t_filename=None):\n settings = QSettings(QSettings.IniFormat, QSettings.UserScope, \"pySPM\", \"pySPM\")\n if t_filename is None:\n home = QDir.cleanPath(os.getenv(\"HOMEPATH\"))\n path = settings.value(\"lastPath\", home)\n self.filename = QFileDialog.getOpenFileName(None, \"Choose measurement file\", path, \"*.ita\")[0]\n else:\n self.filename = t_filename\n check_file = QFileInfo(self.filename)\n self.setWindowTitle(check_file.fileName())\n if not check_file.exists() or not check_file.isFile():\n return\n \n settings.setValue(\"lastPath\", check_file.path())\n self.ita = pySPM.ITA(self.filename, readonly=False)\n self.t, self.S = self.ita.getSpectrum(time=True)\n self.sf, self.k0 = self.ita.get_mass_cal()\n self.mass = pySPM.utils.time2mass(self.t, self.sf, self.k0)\n self.spec = self.ax.plot(self.mass, self.S)[0]\n SatLevel = self.ita.size['pixels']['x']*self.ita.size['pixels']['y']*self.ita.Nscan\n self.sat_level = self.ax.axhline(SatLevel, color='r')\n if 'pySPM' in self.ita.root.goto(\"MassScale\"):\n self.MassCal = []\n N = self.ita.root.goto(\"MassScale/pySPM/N\").get_ulong()\n for i in range(N):\n elt = self.ita.root.goto(\"MassScale/pySPM/\"+str(i)+\"/elt\").value.decode('utf8')\n mass = self.ita.root.goto(\"MassScale/pySPM/\"+str(i)+\"/mass\").get_double()\n time = self.ita.root.goto(\"MassScale/pySPM/\"+str(i)+\"/time\").get_double()\n self.MassCal.append(dict(elt=elt, mass=mass, time=time))\n else:\n self.MassCal = []\n for x in self.ita.root.goto(\"MassScale/calib\"):\n if x.name == 'assign':\n self.MassCal.append({'elt':x.get_string()})\n if x.name == 'mcp':\n mcp = struct.unpack(\"<10d\", x.value)\n self.MassCal[-1]['time'] = mcp[0]\n self.MassCal[-1]['mass'] = mcp[1]\n self.DoMassCal() \n \n def get_mass(self, formula):\n if self.ita is not None and not (formula.endsWith('+') or formula.endsWith('-')):\n if self.ita.polarity=='Negative':\n pol = \"-\"\n else:\n pol = \"+\"\n formula = formula + pol\n return pySPM.utils.get_mass(formula) \n\n def plotSpectra(self):\n self.mass = pySPM.utils.time2mass(self.t, self.sf, self.k0)\n self.spec.set_xdata(self.mass)\n self.refresh()\n\n def onMousePress(self, event):\n if event.button == 3:\n x = event.xdata\n \n i = pySPM.utils.closest_arg(self.mass, x)\n last = i-1;\n while i!=last:\n last = i\n i = i-10+np.argmax(self.S[i-10:i+10])\n I = self.S[i]\n self.MassCal.append(dict(time=self.t[i]))\n print(\"clicked @{}u (t={})\".format(x, self.t[i]))\n elif event.button == Qt.LeftButton:\n self.action = ('move', event.xdata)\n else:\n print(event)\n\n def on_motion(self, event):\n r = self.ax.get_xlim()\n if self.action is not None:\n if self.action[0] == 'move':\n if event.xdata is not None:\n delta = r[1]-r[0]\n dx = event.xdata-self.action[1]\n self.ax.set_xlim((r[0]-dx,r[1]-dx))\n self.refresh()\n\n def onMouseRelease(self, event):\n if event.button == 3:\n x = event.xdata\n r = self.ax.get_xlim() \n frags = []\n for nm in range(int(np.round(r[0],0)), int(np.round(r[1],0))+1):\n frags += pySPM.utils.get_peaklist(nm, self.ita.polarity=='Negative')\n masses = np.array([pySPM.utils.get_mass(x) for x in frags])\n dm = masses-x\n i = np.argmin(np.abs(dm))\n self.MassCal[-1]['mass'] = masses[i]\n self.MassCal[-1]['elt'] = frags[i]\n print(\"assigned to {}\".format(frags[i]))\n self.DoMassCal()\n self.action = None\n self.refresh()\n\n def save_mass_cal(self):\n root = self.ita.root.goto(\"MassScale\")\n for i, mcp in enumerate(self.MassCal):\n for key in mcp:\n if key in ['elt']:\n data = mcp[key].encode('utf8') + b'\\x00'*(256-len(mcp[key]))\n root.edit_block(\"pySPM/\"+str(i), key, data)\n else:\n root.edit_block(\"pySPM/\"+str(i), key, struct.pack(\"<d\", mcp[key]))\n root.edit_block(\"pySPM\", \"N\", struct.pack(\"<I\", len(self.MassCal)))\n \n def DoMassCal(self):\n ts = [x['time'] for x in self.MassCal]\n ms = [x['mass'] for x in self.MassCal]\n if len(ts)>1:\n self.sf, self.k0, self.dsf, self.dk0 = pySPM.utils.fitSpectrum(ts, ms, error=True)\n else:\n self.k0 = ts[0]-self.sf*np.sqrt(ms[0])\n dsf = 0\n dk0 = 0\n self.ui.lab_k0.setText(\"k0 = {} ± {}\".format(self.k0, self.dk0))\n self.ui.lab_sf.setText(\"sf = {} ± {}\".format(self.sf, self.dsf))\n \n self.ita.setK0(self.k0)\n self.ita.setSF(self.sf)\n \n self.ui.tableMassCal.clearContents()\n self.ui.tableMassCal.setRowCount(len(self.MassCal))\n for i in range(len(self.MassCal)):\n self.ui.tableMassCal.setItem(i, 0, QTableWidgetItem(self.MassCal[i]['elt']))\n m = pySPM.utils.time2mass(self.MassCal[i]['time'], self.sf, self.k0)\n self.ui.tableMassCal.setItem(i, 1, QTableWidgetItem(\"{:.3f}\".format(self.MassCal[i]['mass'])))\n self.ui.tableMassCal.setItem(i, 2, QTableWidgetItem(\"{:.0f}\".format(self.MassCal[i]['time'])))\n delta = \"{:.6f}\".format(m-self.MassCal[i]['mass'])\n self.ui.tableMassCal.setItem(i, 3, QTableWidgetItem(delta)) \n self.mass = pySPM.utils.time2mass(self.t, self.sf, self.k0)\n self.save_mass_cal()\n self.plotSpectra()\n \n def toggleMassCal(self):\n vis = not self.ui.tableMassCal.isVisible()\n self.ui.tableMassCal.setVisible(vis)\n self.ui.pushButton.setVisible(vis)\n if vis:\n self.ui.pushButton_2.setText(\"«\")\n else:\n self.ui.pushButton_2.setText(\"»\")\n \n def closeEvent(self, event):\n print(\"Closing spectra tool...\")\n del self.ita\n \ndef main(filename=None):\n if filename is None and len(sys.argv)>1:\n filename = sys.argv[1]\n print(\"Loading file \\\"{}\\\"\".format(filename))\n app = QApplication(sys.argv)\n window = SpectraViewer(filename)\n window.show()\n sys.exit(app.exec_())\n \nif __name__ == '__main__':\n main()\n",
"# -- coding: utf-8 --\n\n# Copyright 2018 Olivier Scholder <[email protected]>\n\n\"\"\"\nThis is a standalone script which allows the user to perfome cross-section on ToF-SIMS images on different channels\n\"\"\"\n\nimport sys\nfrom PyQt5.QtWidgets import QApplication, QMainWindow, QTableWidgetItem, QFileDialog, QWidget, QAction, QProgressBar, QStatusBar\nfrom PyQt5.QtGui import QPixmap\nfrom PyQt5.QtCore import Qt, QCoreApplication\nfrom slicer import Ui_slicer\nimport os\nimport re\nimport pySPM\nimport numpy as np\nfrom pySPM.utils import CDF\nfrom scipy import optimize as opt\n\nclass SlicerApp(QWidget):\n def __init__(self, filename=None):\n super(QWidget, self).__init__()\n self.ui = Ui_slicer()\n self.ui.setupUi(self)\n \n self.canvas = self.ui.mpl.canvas\n self.fig = self.ui.mpl.canvas.fig\n self.initPlotLayout()\n self.level = None\n if filename is None:\n if len(sys.argv) < 2:\n self.path, _ = QFileDialog.getOpenFileName(self,\"ITA Image\", \"\",\"(*.ITA)\")\n else:\n # If an argument is sent to the script, the first argument will be used as a Path. Very usefull for debugging the script without having to selectr the folder each time with window dialog\n self.path = sys.argv[1]\n else:\n self.path = filename\n if not os.path.exists(self.path):\n raise Exception(\"File \\\"{}\\\" is not found\".format(self.path))\n if os.path.exists(self.path+\".level.npy\"):\n self.level = np.load(self.path+\".level.npy\")\n self.curs = [0,0,0]\n self.volume = None\n self.ITA = pySPM.ITA(self.path) \n for i,x in enumerate(self.ITA.get_masses()):\n self.ui.peakList.setRowCount(i+1)\n self.ui.peakList.setItem(i, 0, QTableWidgetItem(x['assign']))\n self.ui.peakList.setItem(i, 1, QTableWidgetItem(\"{:.2f}u\".format((x['cmass']))))\n self.ui.peakList.setItem(i, 2, QTableWidgetItem(\"{:.2f}u\".format(x['umass'] - x['lmass'])))\n self.ui.peakList.show()\n self.ui.cmap.currentIndexChanged.connect(self.plot)\n self.ui.prof1daxis.currentIndexChanged.connect(self.plot)\n self.ui.peakList.cellClicked.connect(self.load_channel)\n self.canvas.mpl_connect('button_press_event', self.on_pick)\n self.flatAction = QAction(\"Flatten substrate from this channel\")\n self.flatAction.triggered.connect(self.flatten)\n self.ui.correction.stateChanged.connect(self.plot)\n self.ui.peakList.addAction(self.flatAction)\n self.ui.status.setText(\"IDLE\")\n \n def flatline(self, y):\n for x in range(self.volume.shape[1]):\n popt, pcov = opt.curve_fit(CDF, np.arange(self.volume.shape[2]), self.volume[y,x,:], (10, self.volume.shape[2]/2, 1))\n self.level[y, x] = popt[1]\n \n def flatten(self):\n from scipy import optimize as opt\n self.ui.status.setText(\"Start the flattening...\")\n self.level = np.zeros(self.volume.shape[:2])\n self.ui.pb.setMaximum(self.volume.shape[0])\n for y in range(self.volume.shape[0]):\n self.ui.pb.setValue(y)\n self.flatline(y)\n QCoreApplication.processEvents()\n self.ax.clear()\n self.ax.imshow(self.level)\n self.canvas.draw()\n self.ui.pb.setValue(0)\n self.ui.status.setText(\"Flattening finished\")\n np.save(self.path+\".level\", self.level)\n \n def load_channel(self, row, col):\n self.ui.status.setText(\"Loading channel...\")\n id = row\n vol = []\n for i in range(self.ITA.Nscan):\n x = self.ITA.getImage(id, i)\n vol.append(x)\n self.volume = np.stack([x for x in vol], axis=2)\n if not self.level is None:\n self.corrected = np.zeros(self.volume.shape)\n z = np.arange(self.ITA.Nscan)\n self.ui.pb.setMaximum(self.level.shape[0])\n for y in np.arange(self.level.shape[0]):\n self.ui.pb.setValue(y)\n for x in np.arange(self.level.shape[1]):\n dz = int(-self.level[y,x] + np.max(self.level))\n self.corrected[y,x,dz:] = self.volume[y,x,:self.volume.shape[2]-dz]\n self.ui.pb.setValue(0)\n self.plot()\n self.ui.status.setText(\"IDLE\")\n \n def plot(self):\n if self.ui.correction.isChecked():\n A = self.corrected\n else:\n A = self.volume\n cmap = self.ui.cmap.currentText()\n self.axXY.clear()\n self.axXZ.clear()\n self.axYZ.clear()\n self.ax.clear()\n if self.volume is None:\n return\n self.axXY.imshow(A[:,:,self.curs[2]],cmap=cmap)\n self.axXZ.imshow(A[self.curs[1],:,:].T,cmap=cmap)\n self.axYZ.imshow(A[:,self.curs[0],:].T,cmap=cmap)\n self.axXY.axhline(self.curs[1])\n self.axXY.axvline(self.curs[0])\n self.axXZ.axhline(self.curs[2])\n self.axXZ.axvline(self.curs[0])\n self.axYZ.axhline(self.curs[2])\n self.axYZ.axvline(self.curs[1])\n self.axXY.set_title(\"XY\")\n self.axXZ.set_title(\"XZ\")\n self.axYZ.set_title(\"YZ\")\n if self.ui.prof1daxis.currentText() in 'XYZ':\n i = 'XYZ'.index(self.ui.prof1daxis.currentText())\n self.ax.set_xlabel(\"XYZ\"[i])\n if i==0:\n self.ax.plot(A[:,self.curs[0],self.curs[2]])\n elif i==1:\n self.ax.plot(A[self.curs[1],:,self.curs[2]])\n elif i==2:\n self.ax.plot(A[self.curs[1],self.curs[0],:])\n self.canvas.draw()\n \n def on_pick(self, event):\n if not event.inaxes in [self.axYZ,self.axXZ,self.axXY]:\n return\n x = event.xdata\n y = event.ydata\n axis = [self.axYZ,self.axXZ,self.axXY].index(event.inaxes)\n xdata = int(x)\n ydata = int(y)\n \n if event.inaxes == self.axXY:\n self.curs[0] = xdata\n self.curs[1] = ydata\n elif event.inaxes == self.axYZ:\n self.curs[1] = xdata\n self.curs[2] = ydata\n elif event.inaxes == self.axXZ:\n self.curs[0] = xdata\n self.curs[2] = ydata\n else:\n print(\"Click event not handled\")\n self.curs = [np.clip(0,self.curs[i],self.volume.shape[i]-1) for i in range(3)]\n self.plot()\n \n def initPlotLayout(self):\n \"\"\"\n Setup the plotting layout.\n \"\"\"\n self.axXY = self.fig.add_subplot(2,2,1)\n self.axYZ = self.fig.add_subplot(2,2,2)\n self.axXZ = self.fig.add_subplot(2,2,3)\n self.ax = self.fig.add_subplot(2,2,4)\n self.fig.tight_layout()\n\napp = QApplication(sys.argv)\nwindow = SlicerApp()\nwindow.show()\nsys.exit(app.exec_())"
] |
[
[
"numpy.sqrt",
"numpy.abs",
"numpy.round",
"numpy.max",
"numpy.argmax"
],
[
"numpy.clip",
"numpy.arange",
"numpy.save",
"numpy.stack",
"numpy.max",
"numpy.load",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BenjaminWegener/tensorflow-directml
|
[
"ecdbdbd2691e17dcc462fc49138fc7cc1536b7d5"
] |
[
"tensorflow/python/kernel_tests/segment_reduction_ops_test.py"
] |
[
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Functional tests for segment reduction ops.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport itertools\n\nimport numpy as np\n\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import constant_op\nfrom tensorflow.python.framework import dtypes as dtypes_lib\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import test_util\nfrom tensorflow.python.ops import gradient_checker\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import test\n\n\nclass SegmentReductionHelper(test.TestCase):\n\n def _input(self, input_shape, dtype=dtypes_lib.int32):\n num_elem = 1\n for x in input_shape:\n num_elem *= x\n values = np.arange(1, num_elem + 1)\n np_values = values.reshape(input_shape).astype(dtype.as_numpy_dtype)\n # Add a non-zero imaginary component to complex types.\n if dtype.is_complex:\n np_values -= 1j * np_values\n return constant_op.constant(\n np_values, shape=input_shape, dtype=dtype), np_values\n\n def _segmentReduce(self, indices, x, op1, op2=None, num_segments=None,\n initial_value=0):\n if not x.size:\n return np.array([])\n indices = np.asarray(indices)\n if num_segments is None:\n num_segments = indices[-1] + 1\n output = [None] * num_segments\n slice_shape = x.shape[indices.ndim:]\n x_flat = x.reshape((indices.size,) + slice_shape)\n for i, index in enumerate(indices.ravel()):\n if (output[index] is not None) and op1 == np.max:\n for j in range(0, output[index].shape[0]):\n output[index][j] = op1([output[index][j], x_flat[i][j]])\n elif output[index] is not None:\n output[index] = op1(output[index], x_flat[i])\n else:\n output[index] = x_flat[i]\n # zero initialize values that are still uncalcuated.\n initial_value_slice = np.ones(slice_shape) * initial_value\n output = [o if o is not None else initial_value_slice for o in output]\n if op2 is not None:\n output = [op2(o) for o in output]\n output = [o.reshape(slice_shape) for o in output]\n return np.array(output)\n\n def _mean_cum_op(self, x, y):\n return (x[0] + y, x[1] + 1) if isinstance(x, tuple) else (x + y, 2)\n\n def _mean_reduce_op(self, x):\n return x[0] / x[1] if isinstance(x, tuple) else x\n\n def _sqrt_n_reduce_op(self, x):\n return x[0] / np.sqrt(x[1]) if isinstance(x, tuple) else x\n\n\nclass SegmentReductionOpTest(SegmentReductionHelper):\n\n def testValues(self):\n dtypes = [\n dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,\n dtypes_lib.int32, dtypes_lib.complex64, dtypes_lib.complex128\n ]\n\n # Each item is np_op1, np_op2, tf_op\n ops_list = [(np.add, None, math_ops.segment_sum),\n (self._mean_cum_op, self._mean_reduce_op,\n math_ops.segment_mean),\n (np.ndarray.__mul__, None, math_ops.segment_prod),\n (np.minimum, None, math_ops.segment_min),\n (np.maximum, None, math_ops.segment_max)]\n\n # A subset of ops has been enabled for complex numbers\n complex_ops_list = [(np.add, None, math_ops.segment_sum),\n (np.ndarray.__mul__, None, math_ops.segment_prod),\n (self._mean_cum_op, self._mean_reduce_op,\n math_ops.segment_mean)]\n\n n = 10\n shape = [n, 2]\n indices = [i // 3 for i in range(n)]\n for dtype in dtypes:\n if dtype in (dtypes_lib.complex64, dtypes_lib.complex128):\n curr_ops_list = complex_ops_list\n else:\n curr_ops_list = ops_list\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n tf_x, np_x = self._input(shape, dtype=dtype)\n for np_op1, np_op2, tf_op in curr_ops_list:\n np_ans = self._segmentReduce(indices, np_x, np_op1, np_op2)\n s = tf_op(data=tf_x, segment_ids=indices)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np_ans, tf_ans)\n # NOTE(mrry): The static shape inference that computes\n # `tf_ans.shape` can only infer that sizes from dimension 1\n # onwards, because the size of dimension 0 is data-dependent\n # and may therefore vary dynamically.\n self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])\n\n @test_util.run_deprecated_v1\n def testSegmentIdsShape(self):\n shape = [4, 4]\n tf_x, _ = self._input(shape)\n indices = constant_op.constant([0, 1, 2, 2], shape=[2, 2])\n with self.assertRaises(ValueError):\n math_ops.segment_sum(data=tf_x, segment_ids=indices)\n\n @test_util.run_deprecated_v1\n def testSegmentIdsSize(self):\n shape = [4, 4]\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n tf_x, _ = self._input(shape)\n indices = [0, 1]\n s = math_ops.segment_sum(data=tf_x, segment_ids=indices)\n with self.assertRaisesOpError(\"segment_ids should be the same size\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentIdsValid(self):\n # This is a baseline for the following SegmentIdsInvalid* tests.\n shape = [4, 4]\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)\n indices = [0, 0, 0, 1]\n result = math_ops.segment_sum(data=tf_x, segment_ids=indices).eval()\n self.assertAllEqual([[15, 18, 21, 24], [13, 14, 15, 16]], result)\n\n def testSegmentIdsGreaterThanZero(self):\n shape = [4, 4]\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)\n indices = [1, 1, 2, 2]\n np_ans = self._segmentReduce(indices, np_x, np.add)\n s = math_ops.segment_sum(data=tf_x, segment_ids=indices)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np_ans, tf_ans)\n\n def testSegmentIdsHole(self):\n shape = [4, 4]\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n tf_x, np_x = self._input(shape, dtype=dtypes_lib.float32)\n indices = [0, 0, 3, 3]\n np_ans = self._segmentReduce(indices, np_x, np.add)\n s = math_ops.segment_sum(data=tf_x, segment_ids=indices)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np_ans, tf_ans)\n\n @test_util.run_deprecated_v1\n def testSegmentIdsInvalid1(self):\n shape = [4, 4]\n with self.cached_session():\n tf_x, _ = self._input(shape)\n indices = [-1, -1, 0, 0]\n s = math_ops.segment_sum(data=tf_x, segment_ids=indices)\n with self.assertRaisesOpError(\n r\"Segment id -1 out of range \\[0, 1\\), possibly because \"\n \"'segment_ids' input is not sorted.\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentIdsInvalid2(self):\n shape = [4, 4]\n with self.cached_session():\n tf_x, _ = self._input(shape)\n indices = [0, 1, 0, 1]\n s = math_ops.segment_sum(data=tf_x, segment_ids=indices)\n with self.assertRaisesOpError(\"segment ids are not increasing\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentIdsInvalid3(self):\n shape = [4, 4]\n with self.cached_session():\n tf_x, _ = self._input(shape)\n indices = [0, 1, 2, 0]\n s = math_ops.segment_sum(data=tf_x, segment_ids=indices)\n with self.assertRaisesOpError(\n r\"Segment id 1 out of range \\[0, 1\\), possibly \"\n \"because 'segment_ids' input is not sorted.\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentIdsInvalid4(self):\n shape = [4, 4]\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)\n indices = [0, 0, 0, -1]\n s = math_ops.segment_sum(data=tf_x, segment_ids=indices)\n with self.assertRaisesOpError(\"segment ids must be >= 0\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentIdsInvalid5(self):\n shape = [4, 4]\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=use_gpu):\n tf_x, _ = self._input(shape, dtype=dtypes_lib.float32)\n indices = [0, 0, 0, -2]\n s = math_ops.segment_sum(data=tf_x, segment_ids=indices)\n with self.assertRaisesOpError(\"segment ids must be >= 0\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testGradient(self):\n shape = [4, 4]\n indices = [0, 1, 2, 2]\n for tf_op in [\n math_ops.segment_sum, math_ops.segment_mean, math_ops.segment_min,\n math_ops.segment_max\n ]:\n with self.cached_session():\n tf_x, np_x = self._input(shape, dtype=dtypes_lib.float64)\n s = tf_op(data=tf_x, segment_ids=indices)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n tf_x,\n shape,\n s, [3, 4],\n x_init_value=np_x.astype(np.double),\n delta=1)\n self.assertAllClose(jacob_t, jacob_n)\n\n\nclass UnsortedSegmentTest(SegmentReductionHelper):\n\n def __init__(self, methodName='runTest'):\n # Each item is np_op1, np_op2, tf_op, initial_value functor\n self.ops_list = [(np.add, None,\n math_ops.unsorted_segment_sum, lambda t: 0),\n (self._mean_cum_op, self._mean_reduce_op,\n math_ops.unsorted_segment_mean, lambda t: 0),\n (self._mean_cum_op, self._sqrt_n_reduce_op,\n math_ops.unsorted_segment_sqrt_n, lambda t: 0),\n (np.ndarray.__mul__, None,\n math_ops.unsorted_segment_prod, lambda t: 1),\n (np.minimum, None,\n math_ops.unsorted_segment_min, lambda t: t.max),\n (np.maximum, None,\n math_ops.unsorted_segment_max, lambda t: t.min)]\n\n # A subset of ops has been enabled for complex numbers\n self.complex_ops_list = [(np.add, None,\n math_ops.unsorted_segment_sum, lambda t: 0),\n (np.ndarray.__mul__, None,\n math_ops.unsorted_segment_prod, lambda t: 1)]\n self.differentiable_dtypes = [dtypes_lib.float16, dtypes_lib.float32,\n dtypes_lib.float64]\n self.all_dtypes = (self.differentiable_dtypes +\n [dtypes_lib.bfloat16,\n dtypes_lib.int64, dtypes_lib.int32,\n dtypes_lib.complex64, dtypes_lib.complex128])\n super(UnsortedSegmentTest, self).__init__(methodName=methodName)\n\n def testValues(self):\n indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])\n num_segments = 12\n for indices in indices_flat, indices_flat.reshape(5, 2):\n shape = indices.shape + (2,)\n for dtype in self.all_dtypes:\n ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list\n tf_x, np_x = self._input(shape, dtype=dtype)\n for use_gpu in [True, False]:\n with self.cached_session(use_gpu=True):\n for np_op1, np_op2, tf_op, init_op in ops_list:\n # sqrt_n doesn't support integers\n if (np_op2 == self._sqrt_n_reduce_op and dtype.is_integer):\n continue\n # todo(philjd): enable this test once real_div supports bfloat16\n if (np_op2 in [self._sqrt_n_reduce_op, self._mean_reduce_op] and\n dtype == dtypes_lib.bfloat16):\n continue\n np_ans = self._segmentReduce(\n indices, np_x, np_op1, np_op2, num_segments=num_segments,\n initial_value=init_op(dtype))\n s = tf_op(tf_x, segment_ids=indices, num_segments=num_segments)\n tf_ans = self.evaluate(s)\n if dtype is dtypes_lib.bfloat16:\n tf_ans = tf_ans.astype(np.float32)\n self.assertAllCloseAccordingToType(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, s)\n\n def testNumSegmentsTypes(self):\n dtypes = [dtypes_lib.int32, dtypes_lib.int64]\n indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])\n num_segments = 12\n for indices in indices_flat, indices_flat.reshape(5, 2):\n shape = indices.shape + (2,)\n for dtype in dtypes:\n with self.cached_session(use_gpu=True):\n tf_x, np_x = self._input(shape)\n num_segments_constant = constant_op.constant(\n num_segments, dtype=dtype)\n np_ans = self._segmentReduce(\n indices, np_x, np.add, op2=None, num_segments=num_segments)\n s = math_ops.unsorted_segment_sum(\n data=tf_x,\n segment_ids=indices,\n num_segments=num_segments_constant)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, s)\n\n @test_util.run_deprecated_v1\n def testGradients(self):\n num_cols = 2\n indices_flat = np.array([0, 4, 0, -1, 3, -1, 4, 7, 7, 3])\n num_segments = max(indices_flat) + 3\n for dtype in self.differentiable_dtypes:\n ops_list = self.complex_ops_list if dtype.is_complex else self.ops_list\n for indices in indices_flat, indices_flat.reshape(5, 2):\n shape = indices.shape + (num_cols,)\n # test CPU and GPU as tf.gather behaves differently on each device\n for use_gpu in [False, True]:\n with self.cached_session(use_gpu=use_gpu):\n for _, _, tf_op, _ in ops_list:\n tf_x, np_x = self._input(shape, dtype=dtype)\n s = tf_op(tf_x, indices, num_segments)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n tf_x,\n shape,\n s, [num_segments, num_cols],\n x_init_value=np_x,\n delta=1)\n self.assertAllClose(jacob_t, jacob_n)\n\n @test_util.run_deprecated_v1\n def testProdGrad(self):\n # additional test for the prod gradient to ensure correct handling of zeros\n values = np.array([0, 0, 1, 0, 2, 2, 3, 3, 3], dtype=np.float32)\n indices = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2], dtype=np.int32)\n indices_neg = np.array([-1, 0, 0, -1, 1, 1, -1, 2, 2], dtype=np.int32)\n values_tf = constant_op.constant(values)\n # ground truth partial derivatives\n gradients_indices = np.zeros((9, 3), dtype=np.float32)\n gradients_indices_neg = np.zeros((9, 3), dtype=np.float32)\n # the derivative w.r.t. to the other segments is zero, so here we only\n # explicitly set the grad values for the corresponding segment\n gradients_indices[range(9), indices] = [0, 0, 0, 4, 0, 0, 9, 9, 9]\n gradients_indices_neg[range(9), indices_neg] = [0, 1, 0, 0, 2, 2, 0, 3, 3]\n for use_gpu in [False, True]:\n with self.cached_session(use_gpu=use_gpu):\n for ind, grad_gt in [(indices, gradients_indices),\n (indices_neg, gradients_indices_neg)]:\n s = math_ops.unsorted_segment_prod(values_tf,\n constant_op.constant(ind), 3)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n values_tf, (9,), s, (3,), x_init_value=values, delta=1)\n self.assertAllClose(jacob_t, jacob_n)\n self.assertAllClose(jacob_t, grad_gt)\n\n @test_util.run_deprecated_v1\n def testGradientMatchesSegmentSum(self):\n # Strategy: compute the gradient for UnsortedSegmentSum and SegmentSum\n # and compare the outputs, which should be identical.\n # NB: for this test to work, indices must be valid for SegmentSum, namely\n # it must be sorted, the indices must be contiguous, and num_segments\n # must be max(indices) + 1.\n indices = [0, 0, 1, 1, 1, 2, 3, 4, 5]\n n = len(indices)\n num_cols = 2\n shape = [n, num_cols]\n num_segments = max(indices) + 1\n for dtype in self.differentiable_dtypes:\n with self.cached_session(use_gpu=True):\n tf_x, np_x = self._input(shape, dtype=dtype)\n # Results from UnsortedSegmentSum\n unsorted_s = math_ops.unsorted_segment_sum(\n data=tf_x, segment_ids=indices, num_segments=num_segments)\n unsorted_jacob_t, unsorted_jacob_n = (\n gradient_checker.compute_gradient(tf_x, shape, unsorted_s,\n [num_segments, num_cols],\n x_init_value=np_x, delta=1))\n\n # Results from SegmentSum\n sorted_s = math_ops.segment_sum(data=tf_x, segment_ids=indices)\n sorted_jacob_t, sorted_jacob_n = gradient_checker.compute_gradient(\n tf_x,\n shape,\n sorted_s, [num_segments, num_cols],\n x_init_value=np_x,\n delta=1)\n self.assertAllClose(unsorted_jacob_t, sorted_jacob_t)\n self.assertAllClose(unsorted_jacob_n, sorted_jacob_n)\n\n @test_util.run_deprecated_v1\n def testBadIndices(self):\n # Note: GPU kernel does not return the out-of-range error needed for this\n # test, so this test is marked as cpu-only.\n # Note: With PR #13055 a negative index will be ignored silently.\n with self.session(use_gpu=False):\n for bad in [[2]], [[7]]:\n unsorted = math_ops.unsorted_segment_sum([[17]], bad, num_segments=2)\n with self.assertRaisesOpError(\n r\"segment_ids\\[0,0\\] = %d is out of range \\[0, 2\\)\" % bad[0][0]):\n self.evaluate(unsorted)\n\n @test_util.run_deprecated_v1\n def testEmptySecondDimension(self):\n dtypes = [np.float16, np.float32, np.float64, np.int64, np.int32,\n np.complex64, np.complex128]\n with self.session(use_gpu=True):\n for dtype in dtypes:\n for itype in (np.int32, np.int64):\n data = np.zeros((2, 0), dtype=dtype)\n segment_ids = np.array([0, 1], dtype=itype)\n unsorted = math_ops.unsorted_segment_sum(data, segment_ids, 2)\n self.assertAllEqual(unsorted.eval(), np.zeros((2, 0), dtype=dtype))\n\n def testDropNegatives(self):\n # Note: the test is done by replacing segment_ids with 8 to -1\n # for index and replace values generated by numpy with 0.\n indices_flat = np.array([0, 4, 0, 8, 3, 8, 4, 7, 7, 3])\n num_segments = 12\n for indices in indices_flat, indices_flat.reshape(5, 2):\n shape = indices.shape + (2,)\n for dtype in self.all_dtypes:\n with self.session(use_gpu=True):\n tf_x, np_x = self._input(shape, dtype=dtype)\n np_ans = self._segmentReduce(\n indices, np_x, np.add, op2=None, num_segments=num_segments)\n # Replace np_ans[8] with 0 for the value\n np_ans[8:] = 0\n # Replace 8 with -1 in indices\n np.place(indices, indices == 8, [-1])\n s = math_ops.unsorted_segment_sum(\n data=tf_x, segment_ids=indices, num_segments=num_segments)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np_ans, tf_ans)\n self.assertShapeEqual(np_ans, s)\n\n\nclass SparseSegmentReductionHelper(SegmentReductionHelper):\n\n def _sparse_input(self, input_shape, num_indices, dtype=dtypes_lib.int32):\n a, b = super(SparseSegmentReductionHelper, self)._input(input_shape, dtype)\n indices = np.random.randint(0, input_shape[0], num_indices).astype(np.int32)\n return (constant_op.constant(\n indices, dtype=dtypes_lib.int32), indices, a, b)\n\n def _sparseSegmentReduce(self,\n x,\n indices,\n segment_indices,\n op1,\n op2=None,\n num_segments=None):\n return self._segmentReduce(\n segment_indices, x[indices], op1, op2, num_segments=num_segments)\n\n\nclass SparseSegmentReductionOpTest(SparseSegmentReductionHelper):\n\n def testValues(self):\n dtypes = [\n dtypes_lib.float32, dtypes_lib.float64, dtypes_lib.int64,\n dtypes_lib.int32\n ]\n\n mean_dtypes = [dtypes_lib.float32, dtypes_lib.float64]\n\n # Each item is np_op1, np_op2, tf_op\n ops_list = [(np.add, None, math_ops.sparse_segment_sum),\n (self._mean_cum_op, self._mean_reduce_op,\n math_ops.sparse_segment_mean)]\n\n n = 400\n shape = [n, 2]\n segment_indices = []\n for i in range(20):\n for _ in range(i + 1):\n segment_indices.append(i)\n num_indices = len(segment_indices)\n for dtype in dtypes:\n with self.cached_session(use_gpu=False):\n tf_indices, np_indices, tf_x, np_x = self._sparse_input(\n shape, num_indices, dtype=dtype)\n for np_op1, np_op2, tf_op in ops_list:\n if tf_op == math_ops.sparse_segment_mean and dtype not in mean_dtypes:\n continue\n np_ans = self._sparseSegmentReduce(np_x, np_indices, segment_indices,\n np_op1, np_op2)\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np_ans, tf_ans)\n # NOTE(mrry): The static shape inference that computes\n # `tf_ans.shape` can only infer that sizes from dimension 1\n # onwards, because the size of dimension 0 is data-dependent\n # and may therefore vary dynamically.\n self.assertAllEqual(np_ans.shape[1:], tf_ans.shape[1:])\n\n def testSegmentIdsHole(self):\n tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [(np.add, None, math_ops.sparse_segment_sum), (\n self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]\n segment_indices = [0, 2, 2, 2]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for np_op1, np_op2, tf_op in ops_list:\n np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,\n np_op1, np_op2)\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np_ans, tf_ans)\n\n def testWithNumSegments(self):\n tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [(np.add, None, math_ops.sparse_segment_sum_with_num_segments),\n (self._mean_cum_op, self._mean_reduce_op,\n math_ops.sparse_segment_mean_with_num_segments)]\n segment_indices = [0, 2, 2, 2]\n tf_indices = [8, 3, 0, 9]\n num_segments = 5\n with self.session(use_gpu=False):\n for np_op1, np_op2, tf_op in ops_list:\n np_ans = self._sparseSegmentReduce(\n np_x,\n tf_indices,\n segment_indices,\n np_op1,\n np_op2,\n num_segments=num_segments)\n s = tf_op(\n data=tf_x,\n indices=tf_indices,\n segment_ids=segment_indices,\n num_segments=num_segments)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np_ans, tf_ans)\n\n def testWithEmptySegments(self):\n tf_x = constant_op.constant([], shape=[0, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_sum_with_num_segments,\n math_ops.sparse_segment_mean_with_num_segments\n ]\n segment_indices = []\n tf_indices = []\n num_segments = 5\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(\n data=tf_x,\n indices=tf_indices,\n segment_ids=segment_indices,\n num_segments=num_segments)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np.zeros([5, 4]), tf_ans)\n\n def testSegmentIdsGreaterThanZero(self):\n tf_x, np_x = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [(np.add, None, math_ops.sparse_segment_sum), (\n self._mean_cum_op, self._mean_reduce_op, math_ops.sparse_segment_mean)]\n segment_indices = [1, 2, 2, 2]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for np_op1, np_op2, tf_op in ops_list:\n np_ans = self._sparseSegmentReduce(np_x, tf_indices, segment_indices,\n np_op1, np_op2)\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n tf_ans = self.evaluate(s)\n self.assertAllClose(np_ans, tf_ans)\n\n def testValid(self):\n # Baseline for the test*Invalid* methods below.\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]\n segment_indices = [0, 1, 2, 2]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testIndicesInvalid1(self):\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]\n segment_indices = [0, 1, 2, 2]\n tf_indices = [8, -1, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n with self.assertRaisesOpError(\n r\"indices\\[1\\] == -1 out of range \\[0, 10\\)\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testIndicesInvalid2(self):\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]\n segment_indices = [0, 1, 2, 2]\n tf_indices = [8, 3, 0, 10]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n with self.assertRaisesOpError(\n r\"indices\\[3\\] == 10 out of range \\[0, 10\\)\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentsInvalid2(self):\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]\n segment_indices = [0, 1, 0, 1]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n with self.assertRaisesOpError(\"segment ids are not increasing\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentsInvalid3(self):\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]\n segment_indices = [0, 1, 2, 0]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n with self.assertRaisesOpError(\n r\"Segment id 1 out of range \\[0, 1\\), possibly because \"\n \"'segment_ids' input is not sorted\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentsInvalid4(self):\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]\n segment_indices = [-1, 0, 1, 1]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n with self.assertRaisesOpError(\n r\"Segment id -1 out of range \\[0, 2\\), possibly because \"\n \"'segment_ids' input is not sorted\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentsInvalid6(self):\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]\n segment_indices = [0, 0, 0, -1]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n with self.assertRaisesOpError(\"segment ids must be >= 0\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentsInvalid7(self):\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]\n segment_indices = [0, 0, 0, -2]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n with self.assertRaisesOpError(\"segment ids must be >= 0\"):\n self.evaluate(s)\n\n def testSegmentWithNumSegmentsValid(self):\n # Baseline for the test*WithNumSegmentsInvalid* methods below.\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_sum_with_num_segments,\n math_ops.sparse_segment_mean_with_num_segments,\n ]\n num_segments = 5\n segment_indices = [0, 1, 3, 3]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(\n data=tf_x,\n indices=tf_indices,\n segment_ids=segment_indices,\n num_segments=num_segments)\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentWithNumSegmentsInvalid1(self):\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_sum_with_num_segments,\n math_ops.sparse_segment_mean_with_num_segments,\n ]\n num_segments = 5\n segment_indices = [0, 1, 3, 5]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(\n data=tf_x,\n indices=tf_indices,\n segment_ids=segment_indices,\n num_segments=num_segments)\n with self.assertRaisesOpError(\"segment ids must be < num_segments\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testSegmentWithNumSegmentsInvalid2(self):\n tf_x, _ = self._input([10, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_sum_with_num_segments,\n math_ops.sparse_segment_mean_with_num_segments,\n ]\n num_segments = -2\n segment_indices = [0, 1, 3, 3]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n with self.assertRaisesRegexp(\n ValueError, \"Cannot specify a negative value for num_segments\"):\n tf_op(\n data=tf_x,\n indices=tf_indices,\n segment_ids=segment_indices,\n num_segments=num_segments)\n\n @test_util.run_deprecated_v1\n def testGradient(self):\n shape = [10, 4]\n\n segment_indices = [0, 1, 2, 2]\n num_indices = len(segment_indices)\n for tf_op in [math_ops.sparse_segment_sum, math_ops.sparse_segment_mean]:\n with self.cached_session():\n tf_indices, _, tf_x, np_x = self._sparse_input(\n shape, num_indices, dtype=dtypes_lib.float64)\n s = tf_op(data=tf_x, indices=tf_indices, segment_ids=segment_indices)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n tf_x,\n shape,\n s, [3, 4],\n x_init_value=np_x.astype(np.double),\n delta=1)\n self.assertAllClose(jacob_t, jacob_n)\n\n @test_util.run_deprecated_v1\n def testGradientWithEmptySegmentsAtEnd(self):\n shape = [10, 4]\n\n num_segments = 5\n segment_indices = [0, 1, 2, 2]\n num_indices = len(segment_indices)\n for tf_op in [\n math_ops.sparse_segment_sum_with_num_segments,\n math_ops.sparse_segment_mean_with_num_segments,\n ]:\n with self.cached_session():\n tf_indices, _, tf_x, np_x = self._sparse_input(\n shape, num_indices, dtype=dtypes_lib.float64)\n s = tf_op(\n data=tf_x,\n indices=tf_indices,\n segment_ids=segment_indices,\n num_segments=num_segments)\n jacob_t, jacob_n = gradient_checker.compute_gradient(\n tf_x,\n shape,\n s, [5, 4],\n x_init_value=np_x.astype(np.double),\n delta=1)\n self.assertAllClose(jacob_t, jacob_n)\n\n def testGradientValid(self):\n # Baseline for the testGradient*Invalid* methods below.\n tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad\n ]\n segment_indices = [0, 1, 2, 2]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(tf_x, tf_indices, segment_indices, 10)\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testGradientIndicesInvalid1(self):\n tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad\n ]\n segment_indices = [0, 1, 2, 2]\n tf_indices = [8, 3, 0, 10]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(tf_x, tf_indices, segment_indices, 10)\n with self.assertRaisesOpError(r\"Index 10 out of range \\[0, 10\\)\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testGradientIndicesInvalid2(self):\n tf_x, _ = self._input([3, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad\n ]\n segment_indices = [0, 1, 2, 2]\n tf_indices = [8, 3, -1, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(tf_x, tf_indices, segment_indices, 10)\n with self.assertRaisesOpError(r\"Index -1 out of range \\[0, 10\\)\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testGradientSegmentsInvalid1(self):\n tf_x, _ = self._input(\n [3, 4], dtype=dtypes_lib.float32) # expecting 3 segments\n ops_list = [\n math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad\n ]\n segment_indices = [0, 1, 1, 4] # 5 segments\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(tf_x, tf_indices, segment_indices, 10)\n with self.assertRaisesOpError(\"Invalid number of segments\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testGradientSegmentsInvalid2(self):\n tf_x, _ = self._input([1, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad\n ]\n segment_indices = [0, 1, 2, 0]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(tf_x, tf_indices, segment_indices, 10)\n with self.assertRaisesOpError(r\"Segment id 1 out of range \\[0, 1\\)\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testGradientSegmentsInvalid3(self):\n tf_x, _ = self._input([2, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad\n ]\n segment_indices = [-1, 0, 1, 1]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(tf_x, tf_indices, segment_indices, 10)\n with self.assertRaisesOpError(r\"Segment id -1 out of range \\[0, 2\\)\"):\n self.evaluate(s)\n\n @test_util.run_deprecated_v1\n def testGradientSegmentsInvalid4(self):\n tf_x, _ = self._input([0, 4], dtype=dtypes_lib.float32)\n ops_list = [\n math_ops.sparse_segment_mean_grad, math_ops.sparse_segment_sqrt_n_grad\n ]\n segment_indices = [0, 1, 2, -1]\n tf_indices = [8, 3, 0, 9]\n with self.session(use_gpu=False):\n for tf_op in ops_list:\n s = tf_op(tf_x, tf_indices, segment_indices, 10)\n with self.assertRaisesOpError(r\"Segment id 0 out of range \\[0, 0\\)\"):\n self.evaluate(s)\n\n\nclass SegmentReductionOpBenchmark(test.Benchmark):\n outer_dim_options = [2**x for x in range(9, 14, 2)]\n ratio_options = [2**x for x in range(1, 6, 2)]\n inner_dim_options = [2**x for x in range(9, 14, 2)]\n # randomly generated sizes with less alignments\n inner_dim_options += [\n 1120, 1215, 1856, 1302, 1329, 1531, 1313, 1672, 1851, 1584\n ]\n dtype_options = [np.float32, np.float64]\n options = (outer_dim_options, ratio_options, inner_dim_options, dtype_options)\n # pylint: disable=g-long-lambda\n op_functors = [lambda vc, vs, seg_ids:\n (\"sorted\", math_ops.segment_sum(vc, vs)),\n lambda vc, vs, seg_ids:\n (\"unsorted\",\n math_ops.unsorted_segment_sum(vc, vs, seg_ids[-1]+1))]\n # pylint: enable=g-long-lambda\n repeat = 10\n\n def _npTypeToStr(self, t):\n if t == np.float32:\n return \"fp32\"\n if t == np.float64:\n return \"fp64\"\n\n def _runGraph(self, op_functor, outer_dim, ratio, inner_dim, dtype):\n output_outer_dim = int(outer_dim / ratio)\n const = np.random.randint(5, size=(outer_dim, inner_dim))\n seg_ids = np.sort(np.random.randint(output_outer_dim, size=outer_dim))\n vs = variables.Variable(seg_ids.astype(np.int32))\n with ops.device(test_util.gpu_device_name()):\n vc = variables.Variable(const.astype(dtype))\n name, op = op_functor(vc, vs, seg_ids)\n with session.Session() as sess:\n variables.global_variables_initializer().run()\n r = self.run_op_benchmark(\n sess,\n op,\n min_iters=self.repeat,\n name=\"_\".join(\n map(str,\n [name, outer_dim, ratio, inner_dim,\n self._npTypeToStr(dtype)])))\n return name, r[\"wall_time\"]\n\n def benchmarkSegmentSumGPU(self):\n if not test.is_gpu_available(cuda_only=True):\n return\n for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):\n op_functor = self.op_functors[0]\n with ops.Graph().as_default():\n self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)\n\n def benchmarkUnsortedSegmentSumGPU(self):\n if not test.is_gpu_available(cuda_only=True):\n return\n for outer_dim, ratio, inner_dim, dtype in itertools.product(*self.options):\n op_functor = self.op_functors[1]\n with ops.Graph().as_default():\n self._runGraph(op_functor, outer_dim, ratio, inner_dim, dtype)\n\n\nif __name__ == \"__main__\":\n test.main()\n"
] |
[
[
"tensorflow.python.ops.gradient_checker.compute_gradient",
"numpy.sqrt",
"numpy.asarray",
"numpy.arange",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.test.is_gpu_available",
"tensorflow.python.ops.math_ops.segment_sum",
"numpy.ones",
"tensorflow.python.client.session.Session",
"tensorflow.python.platform.test.main",
"numpy.random.randint",
"tensorflow.python.ops.math_ops.unsorted_segment_sum",
"tensorflow.python.framework.test_util.gpu_device_name",
"tensorflow.python.ops.variables.global_variables_initializer",
"numpy.array",
"numpy.zeros",
"numpy.place",
"tensorflow.python.framework.constant_op.constant"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"1.4",
"2.7",
"2.2",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.6",
"1.2",
"2.10"
]
}
] |
quantumiracle/Reinforcement-Learning-for-Robotics
|
[
"527befb0545d90fc278a820d85af77336b0a9763",
"527befb0545d90fc278a820d85af77336b0a9763"
] |
[
"DDPG4Reacher2/env_test.py",
"DDPGfD/env_2obstacle.py"
] |
[
"import pygame\nimport numpy as np\nimport math\nimport time\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import BoundaryNorm\nfrom matplotlib.ticker import MaxNLocator\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom matplotlib.ticker import LinearLocator, FormatStrFormatter\n\n\nclass Reacher:\n def __init__(self, screen_size, link_lengths, joint_angles):\n # Global variables\n self.screen_size = screen_size\n self.link_lengths = link_lengths\n self.joint_angles = joint_angles\n self.num_actions=5 # equals to number of joints - 1\n self.L = 8 # distance from target to get reward 2\n\n # The main entry point\n self.screen = pygame.display.set_mode((self.screen_size, self.screen_size))\n pygame.display.set_caption(\"Reacher\")\n self.is_running = 1\n self.target_pos1=[self.screen_size*7/8, self.screen_size/4]\n self.target_pos2=[self.screen_size*7/8, 3*self.screen_size/8]\n self.penalty_pos1=[self.screen_size*6/8, 4*self.screen_size/16]\n self.penalty_pos2=[self.screen_size*6/8, 5*self.screen_size/16]\n self.penalty_pos3=[self.screen_size*6/8, 6*self.screen_size/16]\n self.penalty_pos4=[self.screen_size*6/8, 7*self.screen_size/16]\n self.half_target_pos1=[self.screen_size*2/8, 4*self.screen_size/16]\n self.half_target_pos2=[self.screen_size*2/8, 5*self.screen_size/16]\n self.half_target_pos3=[self.screen_size*2/8, 6*self.screen_size/16]\n self.half_target_pos4=[self.screen_size*2/8, 7*self.screen_size/16]\n\n # Function to compute the transformation matrix between two frames\n def compute_trans_mat(self, angle, length):\n cos_theta = math.cos(math.radians(angle))\n sin_theta = math.sin(math.radians(angle))\n dx = -length * sin_theta\n dy = length * cos_theta\n T = np.array([[cos_theta, -sin_theta, dx], [sin_theta, cos_theta, dy], [0, 0, 1]])\n return T\n\n\n # Function to draw the current state of the world\n def draw_current_state(self, ):\n # First link in world coordinates\n T_01 = self.compute_trans_mat(self.joint_angles[0], self.link_lengths[0])\n origin_1 = np.dot(T_01, np.array([0, 0, 1]))\n p0 = [0, 0]\n p1 = [origin_1[0], -origin_1[1]] # the - is because the y-axis is opposite in world and image coordinates\n # Second link in world coordinates\n T_12 = self.compute_trans_mat(self.joint_angles[1], self.link_lengths[1])\n origin_2 = np.dot(T_01, np.dot(T_12, np.array([0, 0, 1])))\n p2 = [origin_2[0], -origin_2[1]] # the - is because the y-axis is opposite in world and image coordinates\n # Third link in world coordinates\n T_23 = self.compute_trans_mat(self.joint_angles[2], self.link_lengths[2])\n origin_3 = np.dot(T_01, np.dot(T_12, np.dot(T_23, np.array([0, 0, 1]))))\n p3 = [origin_3[0], -origin_3[1]] # the - is because the y-axis is opposite in world and image coordinates\n \n T_34 = self.compute_trans_mat(self.joint_angles[3], self.link_lengths[3])\n origin_4 = np.dot(T_01, np.dot(T_12, np.dot(T_23, np.dot(T_34, np.array([0, 0, 1])))))\n p4 = [origin_4[0], -origin_4[1]]\n\n T_45 = self.compute_trans_mat(self.joint_angles[4], self.link_lengths[4])\n origin_5 = np.dot(T_01, np.dot(T_12, np.dot(T_23, np.dot(T_34, np.dot(T_45, np.array([0, 0, 1]))))))\n p5 = [origin_5[0], -origin_5[1]]\n \n # Compute the screen coordinates\n p0_u = int(0.5 * self.screen_size + p0[0])\n p0_v = int(0.5 * self.screen_size + p0[1])\n p1_u = int(0.5 * self.screen_size + p1[0])\n p1_v = int(0.5 * self.screen_size + p1[1])\n p2_u = int(0.5 * self.screen_size + p2[0])\n p2_v = int(0.5 * self.screen_size + p2[1])\n p3_u = int(0.5 * self.screen_size + p3[0])\n p3_v = int(0.5 * self.screen_size + p3[1])\n p4_u = int(0.5 * self.screen_size + p4[0])\n p4_v = int(0.5 * self.screen_size + p4[1])\n p5_u = int(0.5 * self.screen_size + p5[0])\n p5_v = int(0.5 * self.screen_size + p5[1])\n # Draw\n self.screen.fill((0, 0, 0))\n pygame.draw.line(self.screen, (255, 255, 255), [p0_u, p0_v], [p1_u, p1_v], 5)\n pygame.draw.line(self.screen, (255, 255, 255), [p1_u, p1_v], [p2_u, p2_v], 5)\n pygame.draw.line(self.screen, (255, 255, 255), [p2_u, p2_v], [p3_u, p3_v], 5)\n pygame.draw.line(self.screen, (255, 255, 255), [p3_u, p3_v], [p4_u, p4_v], 5)\n pygame.draw.line(self.screen, (255, 255, 255), [p4_u, p4_v], [p5_u, p5_v], 5)\n\n pygame.draw.circle(self.screen, (0, 255, 0), [p0_u, p0_v], 10)\n pygame.draw.circle(self.screen, (0, 0, 255), [p1_u, p1_v], 10)\n pygame.draw.circle(self.screen, (0, 0, 255), [p2_u, p2_v], 10)\n pygame.draw.circle(self.screen, (255, 0, 0), [p3_u, p3_v], 10)\n pygame.draw.circle(self.screen, (255, 125, 0), [p4_u, p4_v], 10)\n pygame.draw.circle(self.screen, (255, 0, 125), [p5_u, p5_v], 10)\n \n pygame.draw.circle(self.screen, (255, 205, 0), np.array(self.target_pos1).astype(int), 10)\n pygame.draw.circle(self.screen, (255, 255, 0), np.array(self.target_pos2).astype(int), 10)\n\n pygame.draw.circle(self.screen, (100, 100, 0), np.array(self.half_target_pos1).astype(int), 10)\n pygame.draw.circle(self.screen, (100, 100, 0), np.array(self.half_target_pos2).astype(int), 10)\n pygame.draw.circle(self.screen, (100, 100, 0), np.array(self.half_target_pos3).astype(int), 10)\n pygame.draw.circle(self.screen, (100, 100, 0), np.array(self.half_target_pos4).astype(int), 10)\n\n\n pygame.draw.circle(self.screen, (255, 125, 0), np.array(self.penalty_pos1).astype(int), 10)\n pygame.draw.circle(self.screen, (255, 125, 0), np.array(self.penalty_pos2).astype(int), 10)\n pygame.draw.circle(self.screen, (255, 125, 0), np.array(self.penalty_pos3).astype(int), 10)\n pygame.draw.circle(self.screen, (255, 125, 0), np.array(self.penalty_pos4).astype(int), 10)\n\n # Flip the display buffers to show the current rendering\n pygame.display.flip()\n return [p0_u,p0_v,p1_u,p1_v,p2_u,p2_v,p3_u,p3_v,p4_u,p4_v,p5_u,p5_v]\n \n def reset(self,):\n self.joint_angles = [0, 0, 0, 0,0,0]\n self.screen = pygame.display.set_mode((self.screen_size, self.screen_size))\n pygame.display.set_caption(\"Reacher\")\n self.is_running = 1\n pos_set=self.draw_current_state()\n return np.array([np.concatenate((pos_set,self.link_lengths))])\n\n def step(self,action): \n # Get events and check if the user has closed the window\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.is_running = 0\n break\n # Change the joint angles (the increment is in degrees)\n change=np.random.uniform(-1,1,size=3)\n # self.joint_angles[0] += 0.1\n # self.joint_angles[1] += 0.2\n # self.joint_angles[2] += 0.3\n # self.joint_angles[0] += change[0]\n # self.joint_angles[1] += change[1]\n # self.joint_angles[2] += change[2]\n # print(action)\n self.joint_angles[0] += action[0][0]\n self.joint_angles[1] += action[0][1]\n self.joint_angles[2] += action[0][2]\n self.joint_angles[3] += action[0][3]\n self.joint_angles[4] += action[0][4]\n # Draw the robot in its new state\n pos_set=self.draw_current_state()\n # if abs(pos_set[6]-self.target_pos[0])<self.L and abs(pos_set[7]-self.target_pos[1])<self.L:\n # reward = 2\n # else:\n # reward = 0\n\n # reward_0=1000\n # reward = reward_0 * np.exp(-np.sqrt(abs(pos_set[6]-self.target_pos[0])**2+abs(pos_set[7]-self.target_pos[1])**2))\n # print(reward) #e-100\n\n reward=self.compute_reward(pos_set[10],pos_set[11])\n # time.sleep(0.5)\n\n return np.array([np.concatenate((pos_set,self.link_lengths))]), np.array([reward]), np.array([False])\n\n def compute_reward(self,pos_x, pos_y):\n reward_0=10.0\n reward_1=100.0\n reward_2=5.0\n reward = reward_0 / (np.sqrt(abs(pos_x-self.target_pos1[0])**2+abs(pos_y-self.target_pos1[1])**2)+1)\n '''fixed distance area reward, hard to converge for gradient based methods!'''\n if np.sqrt(abs(pos_x-self.target_pos2[0])**2+abs(pos_y-self.target_pos2[1])**2) < 10:\n reward+=reward_1\n # if np.sqrt(abs(pos_x-self.half_target_pos1[0])**2+abs(pos_y-self.half_target_pos1[1])**2)<10:\n # reward+=reward_2\n # if np.sqrt(abs(pos_x-self.half_target_pos2[0])**2+abs(pos_y-self.half_target_pos2[1])**2)<10:\n # reward+=reward_2\n # if np.sqrt(abs(pos_x-self.half_target_pos3[0])**2+abs(pos_y-self.half_target_pos3[1])**2)<10:\n # reward+=reward_2\n # if np.sqrt(abs(pos_x-self.half_target_pos4[0])**2+abs(pos_y-self.half_target_pos4[1])**2)<10:\n # reward+=reward_2\n\n '''continuous reward, easy for gradient based method to converge!'''\n # reward = reward + reward_1/(np.sqrt(abs(pos_x-self.target_pos2[0])**2+abs(pos_y-self.target_pos2[1])**2)+1)\n reward = reward + reward_2/(np.sqrt(abs(pos_x-self.half_target_pos1[0])**2+abs(pos_y-self.half_target_pos1[1])**2)+1)\n reward = reward + reward_2/(np.sqrt(abs(pos_x-self.half_target_pos2[0])**2+abs(pos_y-self.half_target_pos2[1])**2)+1)\n reward = reward + reward_2/(np.sqrt(abs(pos_x-self.half_target_pos3[0])**2+abs(pos_y-self.half_target_pos3[1])**2)+1)\n reward = reward + reward_2/(np.sqrt(abs(pos_x-self.half_target_pos4[0])**2+abs(pos_y-self.half_target_pos4[1])**2)+1)\n\n reward = reward - reward_0 / (np.sqrt(abs(pos_x-self.penalty_pos1[0])**2+abs(pos_y-self.penalty_pos1[1])**2)+1)\n reward = reward - reward_0 / (np.sqrt(abs(pos_x-self.penalty_pos2[0])**2+abs(pos_y-self.penalty_pos2[1])**2)+1)\n reward = reward - reward_0 / (np.sqrt(abs(pos_x-self.penalty_pos3[0])**2+abs(pos_y-self.penalty_pos3[1])**2)+1)\n reward = reward - reward_0 / (np.sqrt(abs(pos_x-self.penalty_pos4[0])**2+abs(pos_y-self.penalty_pos4[1])**2)+1)\n\n # ratio=0.01\n # reward = reward_0 / (((abs(pos_x-self.target_pos[0])**2+abs(pos_y-self.target_pos[1])**2)+1)**ratio)\n # reward = reward - reward_0 / (((abs(pos_x-self.penalty_pos1[0])**2+abs(pos_y-self.penalty_pos1[1])**2)+1)**ratio)\n # reward = reward - reward_0 / (((abs(pos_x-self.penalty_pos2[0])**2+abs(pos_y-self.penalty_pos2[1])**2)+1)**ratio)\n\n # reward = reward / 100.0\n return reward\n \n def visualize_reward(self, ):\n delta1=1\n # x = np.arange(self.screen_size/2, self.screen_size, delta1)\n # y = np.arange(self.screen_size/16, self.screen_size*3/8, delta1)\n x = np.arange(0, self.screen_size, delta1)\n y = np.arange(0, self.screen_size, delta1)\n X, Y = np.meshgrid(x, y)\n # dx=0.5\n # dy=0.5\n # Y, X = np.mgrid[slice(1, self.screen_size, dy),\n # slice(1, self.screen_size, dx)]\n Z=self.compute_reward(X,Y)\n # Plot the surface.\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n surf = ax.plot_surface(X, Y, Z, cmap=cm.coolwarm,\n linewidth=0, antialiased=False)\n fig.colorbar(surf, shrink=0.5, aspect=5)\n\n # levels = MaxNLocator(nbins=20).tick_values(Z.min(), Z.max())\n # cmap = plt.get_cmap('PiYG')\n # plt.figure()\n # CS = plt.contourf(X, Y, Z,cmap=cmap, levels=levels)\n fig.gca().invert_yaxis()\n # plt.colorbar(CS)\n # plt.clabel(CS, inline=1, fontsize=10)\n # plt.title('Reward Map')\n # plt.savefig('map.png')\n plt.show()\n\nif __name__ == \"__main__\":\n screen_size = 1000\n # link_lengths = [200, 140, 100, 80]\n link_lengths = [200, 140, 100,80,60]\n joint_angles = [0, 0, 0, 0,0,0]\n reacher=Reacher(screen_size, link_lengths, joint_angles)\n # reacher.reset()\n num_steps=50\n # Loop until the window is closed\n step=0\n reacher.visualize_reward()\n while reacher.is_running:\n action=np.random.rand(1,5)\n # print(action[0][4])\n print(step)\n step+=1\n reacher.step(action)\n if step >= num_steps:\n reacher.is_running=0\n # for step in range (num_steps):\n # print(step)\n # if reacher.is_running:\n # reacher.step()\n \n\n reacher.reset()\n # print(reacher.is_running)\n step=0\n while reacher.is_running:\n action=np.random.rand(1,5)\n print(step)\n step+=1\n pos=reacher.step(action)\n print(pos,len(pos))\n if step >= num_steps:\n reacher.is_running=0\n\n\n \n",
"'''\nenv with an obstacle area with negative reward value\n'''\n\nimport pygame\nimport numpy as np\nimport math\nimport time\n\n\n\n\nclass Reacher:\n def __init__(self, screen_size, link_lengths, joint_angles):\n # Global variables\n self.screen_size = screen_size\n self.link_lengths = link_lengths\n self.joint_angles = joint_angles\n self.num_actions=3 # equals to number of joints - 1\n self.L = 8 # distance from target to get reward 2\n\n # The main entry point\n self.screen = pygame.display.set_mode((self.screen_size, self.screen_size))\n pygame.display.set_caption(\"Reacher\")\n self.is_running = 1\n self.ini_pos=[480, 60]\n self.target_pos=[self.screen_size/4, self.screen_size*3/4]\n \n self.OBSTACLE_RADIUS = 50\n self.OBSTACLE_PANELTY = -5\n self.OBSTACLE_DISTANCE = 180\n self.NUM_OBSTACLES = 2\n self.obstacle1_pos=0.5 * ( np.array(self.ini_pos)+np.array(self.target_pos))\n self.obstacle2_pos=0.5 * ( np.array(self.ini_pos)+np.array(self.target_pos)) - np.array([self.OBSTACLE_DISTANCE,0])\n print(self.obstacle1_pos, self.obstacle2_pos)\n\n # Function to compute the transformation matrix between two frames\n def compute_trans_mat(self, angle, length):\n cos_theta = math.cos(math.radians(angle))\n sin_theta = math.sin(math.radians(angle))\n dx = -length * sin_theta\n dy = length * cos_theta\n T = np.array([[cos_theta, -sin_theta, dx], [sin_theta, cos_theta, dy], [0, 0, 1]])\n return T\n\n\n # Function to draw the current state of the world\n def draw_current_state(self, ):\n # First link in world coordinates\n T_01 = self.compute_trans_mat(self.joint_angles[0], self.link_lengths[0])\n origin_1 = np.dot(T_01, np.array([0, 0, 1]))\n p0 = [0, 0]\n p1 = [origin_1[0], -origin_1[1]] # the - is because the y-axis is opposite in world and image coordinates\n # Second link in world coordinates\n T_12 = self.compute_trans_mat(self.joint_angles[1], self.link_lengths[1])\n origin_2 = np.dot(T_01, np.dot(T_12, np.array([0, 0, 1])))\n p2 = [origin_2[0], -origin_2[1]] # the - is because the y-axis is opposite in world and image coordinates\n # Third link in world coordinates\n T_23 = self.compute_trans_mat(self.joint_angles[2], self.link_lengths[2])\n origin_3 = np.dot(T_01, np.dot(T_12, np.dot(T_23, np.array([0, 0, 1]))))\n p3 = [origin_3[0], -origin_3[1]] # the - is because the y-axis is opposite in world and image coordinates\n # Compute the screen coordinates\n p0_u = int(0.5 * self.screen_size + p0[0])\n p0_v = int(0.5 * self.screen_size + p0[1])\n p1_u = int(0.5 * self.screen_size + p1[0])\n p1_v = int(0.5 * self.screen_size + p1[1])\n p2_u = int(0.5 * self.screen_size + p2[0])\n p2_v = int(0.5 * self.screen_size + p2[1])\n p3_u = int(0.5 * self.screen_size + p3[0])\n p3_v = int(0.5 * self.screen_size + p3[1])\n # Draw\n self.screen.fill((0, 0, 0))\n pygame.draw.line(self.screen, (255, 255, 255), [p0_u, p0_v], [p1_u, p1_v], 5)\n pygame.draw.line(self.screen, (255, 255, 255), [p1_u, p1_v], [p2_u, p2_v], 5)\n pygame.draw.line(self.screen, (255, 255, 255), [p2_u, p2_v], [p3_u, p3_v], 5)\n pygame.draw.circle(self.screen, (0, 255, 0), [p0_u, p0_v], 10)\n pygame.draw.circle(self.screen, (0, 0, 255), [p1_u, p1_v], 10)\n pygame.draw.circle(self.screen, (0, 0, 255), [p2_u, p2_v], 10)\n pygame.draw.circle(self.screen, (255, 0, 0), [p3_u, p3_v], 10)\n \n pygame.draw.circle(self.screen, (255, 255, 0), np.array(self.target_pos).astype(int), 10)\n pygame.draw.circle(self.screen, (125, 125, 0), np.array(self.obstacle1_pos).astype(int), self.OBSTACLE_RADIUS)\n pygame.draw.circle(self.screen, (125, 125, 0), np.array(self.obstacle2_pos).astype(int), self.OBSTACLE_RADIUS)\n # Flip the display buffers to show the current rendering\n pygame.display.flip()\n return [p0_u,p0_v,p1_u,p1_v,p2_u,p2_v,p3_u,p3_v]\n \n def reset(self,):\n self.joint_angles = [0.1, 0.1, 0.1]\n self.screen = pygame.display.set_mode((self.screen_size, self.screen_size))\n pygame.display.set_caption(\"Reacher\")\n self.is_running = 1\n pos_set=self.draw_current_state()\n return np.array([pos_set])\n\n def step(self,action): \n # Get events and check if the user has closed the window\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.is_running = 0\n break\n # Change the joint angles (the increment is in degrees)\n change=np.random.uniform(-1,1,size=3)\n # self.joint_angles[0] += 0.1\n # self.joint_angles[1] += 0.2\n # self.joint_angles[2] += 0.3\n # self.joint_angles[0] += change[0]\n # self.joint_angles[1] += change[1]\n # self.joint_angles[2] += change[2]\n # print(action)\n self.joint_angles[0] += action[0][0]\n self.joint_angles[1] += action[0][1]\n self.joint_angles[2] += action[0][2]\n # Draw the robot in its new state\n pos_set=self.draw_current_state()\n # if abs(pos_set[6]-self.target_pos[0])<self.L and abs(pos_set[7]-self.target_pos[1])<self.L:\n # reward = 2\n # else:\n # reward = 0\n\n # reward_0=1000\n # reward = reward_0 * np.exp(-np.sqrt(abs(pos_set[6]-self.target_pos[0])**2+abs(pos_set[7]-self.target_pos[1])**2))\n # print(reward) #e-100\n\n reward_0=100.0\n reward = reward_0 / (np.sqrt((pos_set[6]-self.target_pos[0])**2+(pos_set[7]-self.target_pos[1])**2)+1)\n if np.sqrt((pos_set[6]-self.obstacle1_pos[0])**2+(pos_set[7]-self.obstacle1_pos[1])**2) < self.OBSTACLE_RADIUS:\n reward += self.OBSTACLE_PANELTY\n if np.sqrt((pos_set[6]-self.obstacle2_pos[0])**2+(pos_set[7]-self.obstacle2_pos[1])**2) < self.OBSTACLE_RADIUS:\n reward += self.OBSTACLE_PANELTY\n\n # time.sleep(0.5)\n # 8 dim return\n return np.array([pos_set]), np.array([reward]), np.array([False])\n\n\nif __name__ == \"__main__\":\n screen_size = 1000\n # link_lengths = [200, 140, 100, 80]\n link_lengths = [200, 140, 100]\n joint_angles = [0, 0, 0, 0]\n reacher=Reacher(screen_size, link_lengths, joint_angles)\n # reacher.reset()\n num_steps=50\n # Loop until the window is closed\n step=0\n while reacher.is_running:\n print(step)\n action=np.random.rand(1,3)\n step+=1\n time.sleep(0.5)\n reacher.step(action)\n if step >= num_steps:\n reacher.is_running=0\n # for step in range (num_steps):\n # print(step)\n # if reacher.is_running:\n # reacher.step()\n \n\n reacher.reset()\n\n\n \n"
] |
[
[
"numpy.meshgrid",
"numpy.arange",
"numpy.concatenate",
"numpy.random.rand",
"numpy.random.uniform",
"numpy.array",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
],
[
"numpy.random.uniform",
"numpy.sqrt",
"numpy.array",
"numpy.random.rand"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Alwaysproblem/explore-ipu
|
[
"7f039768d40e1f3163e0941e2f8246f11ab953c2"
] |
[
"tensorflow2/access_outfeed.py"
] |
[
"from threading import Thread\n\nfrom tensorflow.python.ipu import ipu_outfeed_queue\nfrom tensorflow.python.ipu import ipu_strategy\nfrom tensorflow.python.ipu import utils\nfrom tensorflow import keras\nimport tensorflow as tf\n\n# The host side queue\noutfeed_queue = ipu_outfeed_queue.IPUOutfeedQueue(feed_name=\"outfeed\")\n\n\n# A custom training loop\[email protected]\ndef training_step(features, labels, in_model, optimizer):\n with tf.GradientTape() as tape:\n predictions = in_model(features, training=True)\n prediction_loss = keras.losses.sparse_categorical_crossentropy(\n labels, predictions)\n loss = tf.reduce_mean(prediction_loss)\n grads = tape.gradient(loss, in_model.trainable_variables)\n optimizer.apply_gradients(zip(grads, in_model.trainable_variables))\n\n outfeed_queue.enqueue(loss)\n return loss\n\n\n# Configure the IPU devices\nutils.configure_ipu_system(utils.create_ipu_config())\n\n# Execute the graph\nstrategy = ipu_strategy.IPUStrategy()\nwith strategy.scope():\n # Create the dataset for feeding the graphs\n dataset = tf.data.Dataset.from_tensors(tf.constant(1.0, shape=[2, 20]))\n dataset = dataset.repeat()\n # Create the keras model and optimizer\n model = keras.models.Sequential([\n keras.layers.Flatten(),\n keras.layers.Dense(128, activation='relu'),\n keras.layers.Dense(10, activation='softmax')\n ])\n opt = keras.optimizers.SGD(0.01)\n NUM_ITERATIONS = 100\n\n # Function to continuously dequeue the outfeed until n examples are seen\n def dequeue_thread_fn():\n counter = 0\n while counter != NUM_ITERATIONS:\n r = outfeed_queue.dequeue().numpy()\n\n # Check if something has been enqueued\n if r.size:\n # The outfeed may have been enqueued multiple times between dequeues\n for t in r:\n print(\"Step\", counter, \"loss = \", t)\n counter += 1\n\n # Start the dequeuing thread\n dequeue_thread = Thread(target=dequeue_thread_fn)\n\n # Run the custom training loop over the data.\n for i, (x, y) in zip(range(NUM_ITERATIONS), dataset):\n strategy.experimental_run_v2(training_step, args=[x, y, model, opt])\n # Start the dequeue_thread once the graph has been compiled\n if i == 0:\n dequeue_thread.start()\n\n # Wait for the dequeuing thread to finish\n dequeue_thread.join()"
] |
[
[
"tensorflow.constant",
"tensorflow.python.ipu.ipu_outfeed_queue.IPUOutfeedQueue",
"tensorflow.reduce_mean",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.keras.optimizers.SGD",
"tensorflow.python.ipu.ipu_strategy.IPUStrategy",
"tensorflow.python.ipu.utils.create_ipu_config",
"tensorflow.keras.layers.Flatten",
"tensorflow.GradientTape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
SamuelmsWong/tensor2tensor
|
[
"7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af",
"7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af",
"7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af",
"7172ad8dc5f1d8f8c0e21cbb831ae2657387a2af"
] |
[
"tensor2tensor/data_generators/imdb.py",
"tensor2tensor/data_generators/google_robot_pushing.py",
"tensor2tensor/models/video/tests_utils.py",
"tensor2tensor/data_generators/algorithmic_math_deepmind.py"
] |
[
"# coding=utf-8\n# Copyright 2020 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"IMDB Sentiment Classification Problem.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.utils import registry\n\nimport tensorflow.compat.v1 as tf\n\n\[email protected]_problem\nclass SentimentIMDB(text_problems.Text2ClassProblem):\n \"\"\"IMDB sentiment classification.\"\"\"\n URL = \"http://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz\"\n\n @property\n def is_generate_per_split(self):\n return True\n\n @property\n def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 10,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }]\n\n @property\n def approx_vocab_size(self):\n return 2**13 # 8k vocab suffices for this small dataset.\n\n @property\n def num_classes(self):\n return 2\n\n def class_labels(self, data_dir):\n del data_dir\n return [\"neg\", \"pos\"]\n\n def doc_generator(self, imdb_dir, dataset, include_label=False):\n dirs = [(os.path.join(imdb_dir, dataset, \"pos\"), True), (os.path.join(\n imdb_dir, dataset, \"neg\"), False)]\n\n for d, label in dirs:\n for filename in os.listdir(d):\n with tf.gfile.Open(os.path.join(d, filename)) as imdb_f:\n doc = imdb_f.read().strip()\n if include_label:\n yield doc, label\n else:\n yield doc\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n \"\"\"Generate examples.\"\"\"\n # Download and extract\n compressed_filename = os.path.basename(self.URL)\n download_path = generator_utils.maybe_download(tmp_dir, compressed_filename,\n self.URL)\n imdb_dir = os.path.join(tmp_dir, \"aclImdb\")\n if not tf.gfile.Exists(imdb_dir):\n with tarfile.open(download_path, \"r:gz\") as tar:\n tar.extractall(tmp_dir)\n\n # Generate examples\n train = dataset_split == problem.DatasetSplit.TRAIN\n dataset = \"train\" if train else \"test\"\n for doc, label in self.doc_generator(imdb_dir, dataset, include_label=True):\n yield {\n \"inputs\": doc,\n \"label\": int(label),\n }\n\n\[email protected]_problem\nclass SentimentIMDBCharacters(SentimentIMDB):\n \"\"\"IMDB sentiment classification, character level.\"\"\"\n\n @property\n def vocab_type(self):\n return text_problems.VocabType.CHARACTER\n\n def global_task_id(self):\n return problem.TaskID.EN_CHR_SENT\n",
"# coding=utf-8\n# Copyright 2020 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Google robot pushing dataset.\n\nUnsupervised Learning for Physical Interaction through Video Prediction\nChelsea Finn, Ian Goodfellow, Sergey Levine\nhttps://arxiv.org/abs/1605.07157\n\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport io\nimport os\nimport numpy as np\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import video_utils\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.utils import registry\n\nimport tensorflow.compat.v1 as tf\n\nBASE_URL = \"https://storage.googleapis.com/brain-robotics-data/push/\"\nDATA_TRAIN = (264, \"push_train/push_train.tfrecord-{:05d}-of-00264\")\nDATA_TEST_SEEN = (5, \"/push_testseen/push_testseen.tfrecord-{:05d}-of-00005\")\nDATA_TEST_NOVEL = (5, \"/push_testnovel/push_testnovel.tfrecord-{:05d}-of-00005\")\n\n\n# Lazy load PIL.Image\ndef PIL_Image(): # pylint: disable=invalid-name\n from PIL import Image # pylint: disable=g-import-not-at-top\n return Image\n\n\[email protected]_problem\nclass VideoGoogleRobotPushing(video_utils.VideoProblem):\n \"\"\"Google robot pushing dataset.\"\"\"\n\n @property\n def num_channels(self):\n return 3\n\n @property\n def frame_height(self):\n return 64\n\n @property\n def frame_width(self):\n return 64\n\n @property\n def total_number_of_frames(self):\n # TODO(mbz): correct this number to be the real total number of frames.\n return 50 * 10 * 1000\n\n @property\n def max_number_of_frames_per_video(self):\n return 60\n\n @property\n def is_generate_per_split(self):\n return True\n\n def parse_frames(self, filename):\n image_key = \"move/{}/image/encoded\"\n action_key = \"move/{}/commanded_pose/vec_pitch_yaw\"\n state_key = \"move/{}/endeffector/vec_pitch_yaw\"\n\n for serialized_example in tf.python_io.tf_record_iterator(filename):\n x = tf.train.Example()\n x.ParseFromString(serialized_example)\n # there are 6 features per frame\n nf = len(x.features.feature.keys()) // 6\n # it seems features after 60 don't have any image\n nf = min(nf, self.max_number_of_frames_per_video)\n\n for i in range(nf):\n image_name = image_key.format(i)\n action_name = action_key.format(i)\n state_name = state_key.format(i)\n\n byte_str = x.features.feature[image_name].bytes_list.value[0]\n img = PIL_Image().open(io.BytesIO(byte_str))\n # The original images are much bigger than 64x64\n img = img.resize((self.frame_width, self.frame_height),\n resample=PIL_Image().BILINEAR)\n arr = np.array(img.getdata())\n frame = arr.reshape(\n self.frame_width, self.frame_height, self.num_channels)\n\n state = x.features.feature[state_name].float_list.value\n action = x.features.feature[action_name].float_list.value\n\n yield i, frame, state, action\n\n def get_urls(self, count, url_part):\n template = os.path.join(BASE_URL, url_part)\n return [template.format(i) for i in range(count)]\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split):\n if dataset_split == problem.DatasetSplit.TRAIN:\n urls = self.get_urls(DATA_TRAIN[0], DATA_TRAIN[1])\n else:\n urls = self.get_urls(DATA_TEST_SEEN[0], DATA_TEST_SEEN[1])\n urls += self.get_urls(DATA_TEST_NOVEL[0], DATA_TEST_NOVEL[1])\n\n for url in urls:\n path = generator_utils.maybe_download(tmp_dir, os.path.basename(url), url)\n for frame_number, frame, state, action in self.parse_frames(path):\n yield {\n \"frame_number\": [frame_number],\n \"frame\": frame,\n \"state\": state,\n \"action\": action,\n }\n\n def hparams(self, defaults, unused_model_hparams):\n p = defaults\n p.modality = {\"inputs\": modalities.ModalityType.VIDEO,\n \"targets\": modalities.ModalityType.VIDEO}\n p.vocab_size = {\"inputs\": 256,\n \"targets\": 256}\n",
"# coding=utf-8\n# Copyright 2020 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Utilties for testing video models.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport numpy as np\n\nfrom tensor2tensor.data_generators import video_generated # pylint: disable=unused-import\n\nfrom tensor2tensor.layers import modalities\nfrom tensor2tensor.utils import registry\n\nimport tensorflow.compat.v1 as tf\n\n\ndef fill_hparams(hparams, in_frames, out_frames):\n hparams.video_num_input_frames = in_frames\n hparams.video_num_target_frames = out_frames\n problem = registry.problem(\"video_stochastic_shapes10k\")\n p_hparams = problem.get_hparams(hparams)\n hparams.problem = problem\n hparams.problem_hparams = p_hparams\n hparams.tiny_mode = True\n hparams.reward_prediction = False\n return hparams\n\n\ndef action_modalities(hparams):\n \"\"\"Modalities with actions.\"\"\"\n hparams.problem_hparams.modality = {\n \"inputs\": modalities.ModalityType.VIDEO_L2_RAW,\n \"input_action\": modalities.ModalityType.SYMBOL,\n \"targets\": modalities.ModalityType.VIDEO_L2_RAW,\n \"target_action\": modalities.ModalityType.SYMBOL,\n }\n hparams.problem_hparams.vocab_size = {\n \"inputs\": 256,\n \"input_action\": 5,\n \"targets\": 256,\n \"target_action\": 5,\n }\n return hparams\n\n\ndef full_modalities(hparams):\n \"\"\"Full modalities with actions and rewards.\"\"\"\n hparams.problem_hparams.modality = {\n \"inputs\": modalities.ModalityType.VIDEO_L2_RAW,\n \"input_action\": modalities.ModalityType.SYMBOL,\n \"input_reward\": modalities.ModalityType.SYMBOL,\n \"targets\": modalities.ModalityType.VIDEO_L2_RAW,\n \"target_action\": modalities.ModalityType.SYMBOL,\n \"target_reward\": modalities.ModalityType.SYMBOL,\n }\n hparams.problem_hparams.vocab_size = {\n \"inputs\": 256,\n \"input_action\": 5,\n \"input_reward\": 3,\n \"targets\": 256,\n \"target_action\": 5,\n \"target_reward\": 3,\n }\n hparams.force_full_predict = True\n return hparams\n\n\ndef create_basic_features(in_frames, out_frames):\n x = np.random.randint(0, 256, size=(8, in_frames, 64, 64, 3))\n y = np.random.randint(0, 256, size=(8, out_frames, 64, 64, 3))\n features = {\n \"inputs\": tf.constant(x, dtype=tf.int32),\n \"targets\": tf.constant(y, dtype=tf.int32),\n }\n return features\n\n\ndef create_action_features(in_frames, out_frames):\n features = create_basic_features(in_frames, out_frames)\n x = np.random.randint(0, 5, size=(8, in_frames, 1))\n y = np.random.randint(0, 5, size=(8, out_frames, 1))\n features[\"input_action\"] = tf.constant(x, dtype=tf.int32)\n features[\"target_action\"] = tf.constant(y, dtype=tf.int32)\n return features\n\n\ndef create_full_features(in_frames, out_frames):\n features = create_basic_features(in_frames, out_frames)\n x = np.random.randint(0, 5, size=(8, in_frames, 1))\n y = np.random.randint(0, 5, size=(8, out_frames, 1))\n features[\"input_reward\"] = tf.constant(x, dtype=tf.int32)\n features[\"target_reward\"] = tf.constant(y, dtype=tf.int32)\n return features\n\n\ndef get_tensor_shape(tensor):\n return tuple([d.value for d in tensor.shape])\n\n\nclass BaseNextFrameTest(tf.test.TestCase):\n \"\"\"Base helper class for next frame tests.\"\"\"\n\n def RunModel(self, model, hparams, features):\n with tf.Session() as session:\n model = model(hparams, tf.estimator.ModeKeys.TRAIN)\n logits, _ = model(features)\n session.run(tf.global_variables_initializer())\n res = session.run(logits)\n return res\n\n def InferModel(self, model, hparams, features):\n with tf.Session() as session:\n model = model(hparams, tf.estimator.ModeKeys.PREDICT)\n output = model.infer(features)\n session.run(tf.global_variables_initializer())\n res = session.run(output)\n return res\n\n def TestVideoModel(self,\n in_frames,\n out_frames,\n hparams,\n model,\n expected_last_dim,\n upsample_method=\"conv2d_transpose\"):\n hparams = fill_hparams(hparams, in_frames, out_frames)\n hparams.upsample_method = upsample_method\n\n features = create_basic_features(in_frames, out_frames)\n output = self.RunModel(model, hparams, features)\n\n targets = features[\"targets\"]\n expected_shape = get_tensor_shape(targets) + (expected_last_dim,)\n self.assertEqual(output.shape, expected_shape)\n\n def TestVideoModelInfer(self,\n in_frames,\n out_frames,\n hparams,\n model,\n expected_last_dim,\n upsample_method=\"conv2d_transpose\"):\n del expected_last_dim\n hparams = fill_hparams(hparams, in_frames, out_frames)\n hparams.upsample_method = upsample_method\n\n features = create_basic_features(in_frames, out_frames)\n output = self.InferModel(model, hparams, features)\n\n self.assertTrue(isinstance(output, dict))\n self.assertTrue(\"outputs\" in output.keys())\n self.assertTrue(\"scores\" in output.keys())\n self.assertTrue(\"targets\" in output.keys())\n expected_shape = get_tensor_shape(features[\"targets\"])\n self.assertEqual(output[\"targets\"].shape, expected_shape)\n\n def TestVideoModelWithActions(self,\n in_frames,\n out_frames,\n hparams,\n model,\n expected_last_dim):\n hparams = fill_hparams(hparams, in_frames, out_frames)\n hparams = action_modalities(hparams)\n hparams.reward_prediction = False\n\n features = create_action_features(in_frames, out_frames)\n output = self.RunModel(model, hparams, features)\n\n targets = features[\"targets\"]\n expected_shape = get_tensor_shape(targets) + (expected_last_dim,)\n self.assertEqual(output.shape, expected_shape)\n\n def TestVideoModelWithActionsInfer(self,\n in_frames,\n out_frames,\n hparams,\n model,\n expected_last_dim):\n del expected_last_dim\n hparams = fill_hparams(hparams, in_frames, out_frames)\n hparams = action_modalities(hparams)\n hparams.reward_prediction = False\n\n features = create_action_features(in_frames, out_frames)\n output = self.InferModel(model, hparams, features)\n\n self.assertTrue(isinstance(output, dict))\n self.assertTrue(\"outputs\" in output.keys())\n self.assertTrue(\"scores\" in output.keys())\n self.assertTrue(\"targets\" in output.keys())\n expected_shape = get_tensor_shape(features[\"targets\"])\n self.assertEqual(output[\"targets\"].shape, expected_shape)\n\n def TestVideoModelWithActionAndRewards(self,\n in_frames,\n out_frames,\n hparams,\n model,\n expected_last_dim):\n hparams = fill_hparams(hparams, in_frames, out_frames)\n hparams = full_modalities(hparams)\n hparams.reward_prediction = True\n\n features = create_full_features(in_frames, out_frames)\n\n res = self.RunModel(model, hparams, features)\n\n output, targets = res[\"targets\"], features[\"targets\"]\n expected_shape = get_tensor_shape(targets) + (expected_last_dim,)\n self.assertEqual(output.shape, expected_shape)\n\n output, targets = res[\"target_reward\"], features[\"target_reward\"]\n # Assuming Symbol Modality\n expected_shape = get_tensor_shape(targets)[:2] + (1, 1, 1, 1, 3,)\n self.assertEqual(output.shape, expected_shape)\n\n def TestVideoModelWithActionAndRewardsInfer(self,\n in_frames,\n out_frames,\n hparams,\n model,\n expected_last_dim):\n del expected_last_dim\n hparams = fill_hparams(hparams, in_frames, out_frames)\n hparams = full_modalities(hparams)\n hparams.reward_prediction = True\n\n features = create_full_features(in_frames, out_frames)\n\n output = self.InferModel(model, hparams, features)\n\n self.assertTrue(isinstance(output, dict))\n self.assertTrue(\"outputs\" in output.keys())\n self.assertTrue(\"scores\" in output.keys())\n self.assertTrue(\"targets\" in output.keys())\n self.assertTrue(\"target_reward\" in output.keys())\n expected_shape = get_tensor_shape(features[\"targets\"])\n self.assertEqual(output[\"targets\"].shape, expected_shape)\n expected_shape = get_tensor_shape(features[\"target_reward\"])[:2]\n self.assertEqual(output[\"target_reward\"].shape, expected_shape)\n\n def TestOnVariousInputOutputSizes(\n self, hparams, model, expected_last_dim, test_infer=True):\n test_funcs = [self.TestVideoModel]\n if test_infer:\n test_funcs += [self.TestVideoModelInfer]\n for test_func in test_funcs:\n test_func(1, 1, hparams, model, expected_last_dim)\n test_func(1, 6, hparams, model, expected_last_dim)\n test_func(4, 1, hparams, model, expected_last_dim)\n test_func(7, 5, hparams, model, expected_last_dim)\n\n def TestWithActions(self, hparams, model, expected_last_dim, test_infer=True):\n test_funcs = [self.TestVideoModelWithActions]\n if test_infer:\n test_funcs += [self.TestVideoModelWithActionsInfer]\n for test_func in test_funcs:\n test_func(1, 1, hparams, model, expected_last_dim)\n test_func(1, 6, hparams, model, expected_last_dim)\n test_func(4, 1, hparams, model, expected_last_dim)\n test_func(7, 5, hparams, model, expected_last_dim)\n\n def TestWithActionAndRewards(\n self, hparams, model, expected_last_dim, test_infer=True):\n test_funcs = [self.TestVideoModelWithActionAndRewards]\n if test_infer:\n test_funcs += [self.TestVideoModelWithActionAndRewardsInfer]\n for test_func in test_funcs:\n test_func(1, 1, hparams, model, expected_last_dim)\n test_func(1, 6, hparams, model, expected_last_dim)\n test_func(4, 1, hparams, model, expected_last_dim)\n test_func(7, 5, hparams, model, expected_last_dim)\n\n def TestOnVariousUpSampleLayers(self, hparams, model, expected_last_dim):\n self.TestVideoModel(4, 1, hparams, model, expected_last_dim,\n upsample_method=\"bilinear_upsample_conv\")\n self.TestVideoModel(4, 1, hparams, model, expected_last_dim,\n upsample_method=\"nn_upsample_conv\")\n",
"# coding=utf-8\n# Copyright 2020 The Tensor2Tensor Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nr\"\"\"Data generators for the DeepMind Mathematics Dataset.\n\nSee https://github.com/deepmind/mathematics_dataset for the original repository.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport os\nimport tarfile\n\nfrom tensor2tensor.data_generators import generator_utils\nfrom tensor2tensor.data_generators import problem\nfrom tensor2tensor.data_generators import text_problems\nfrom tensor2tensor.utils import registry\nfrom tensor2tensor.utils import metrics\n\n\nimport tensorflow.compat.v1 as tf\n\n\n_URL = \"https://storage.cloud.google.com/mathematics-dataset/mathematics_dataset-v1.0.tar.gz\"\n\n\[email protected]_problem\nclass AlgorithmicMathDeepmindAll(text_problems.Text2TextProblem):\n \"\"\"DeepMind Mathematics Problem, v1.0, all data.\"\"\"\n\n @property\n def vocab_type(self):\n return text_problems.VocabType.CHARACTER\n\n @property\n def dataset_splits(self):\n return [{\n \"split\": problem.DatasetSplit.TRAIN,\n \"shards\": 128,\n }, {\n \"split\": problem.DatasetSplit.EVAL,\n \"shards\": 1,\n }, {\n \"split\": \"extra_add_or_sub_big\",\n \"shards\": 1,\n }, {\n \"split\": \"extra_add_sub_multiple_longer\",\n \"shards\": 1,\n }, {\n \"split\": \"extra_div_big\",\n \"shards\": 1,\n }, {\n \"split\": \"extra_mixed_longer\",\n \"shards\": 1,\n }, {\n \"split\": \"extra_mul_big\",\n \"shards\": 1,\n }, {\n \"split\": \"extra_mul_div_multiple_longer\",\n \"shards\": 1,\n }, {\n \"split\": \"inter_add_or_sub\",\n \"shards\": 1,\n }, {\n \"split\": \"inter_add_sub_multiple\",\n \"shards\": 1,\n }, {\n \"split\": \"inter_div\",\n \"shards\": 1,\n }, {\n \"split\": \"inter_mixed\",\n \"shards\": 1,\n }, {\n \"split\": \"inter_mul\",\n \"shards\": 1,\n }, {\n \"split\": \"inter_mul_div_multiple\",\n \"shards\": 1,\n }]\n\n # What evaluation metrics to use with this problem.\n def eval_metrics(self):\n return [metrics.Metrics.ACC, metrics.Metrics.ACC_TOP5,\n metrics.Metrics.ACC_PER_SEQ]\n\n @property\n def is_generate_per_split(self):\n return True\n\n def generate_samples(self, data_dir, tmp_dir, dataset_split, specific_files=False):\n \"\"\"Downloads and extracts the dataset and generates examples.\n\n Args:\n data_dir: The base directory where data and vocab files are stored.\n tmp_dir: temp directory to download and extract the dataset.\n dataset_split: split of the data-set.\n\n Yields:\n The data examples.\n \"\"\"\n # # Create directories if needed.\n # if not tf.gfile.Exists(tmp_dir):\n # tf.gfile.MakeDirs(tmp_dir)\n # if not tf.gfile.Exists(data_dir):\n # tf.gfile.MakeDirs(data_dir)\n\n # # Download and extract the data.\n # filename = os.path.basename(_URL)\n # path = generator_utils.maybe_download(tmp_dir, filename, _URL)\n # print(\"PATH: \", path)\n # tarfile.open(path, \"r:gz\").extractall(tmp_dir)\n\n def expand_split(dataset_split):\n return dataset_split[:5] + \"polate/arithmetic__\" + dataset_split[6:]\n\n # Create the list of directories with data files.\n train_dirs = [\"mathematics_dataset-v1.0/train-easy\", \"mathematics_dataset-v1.0/train-medium\", \"mathematics_dataset-v1.0/train-hard\"]\n eval_dirs = [\"mathematics_dataset-v1.0/interpolate\", \"mathematics_dataset-v1.0/extrapolate\"]\n dirs = eval_dirs\n # this only happens if not training and specific_files\n if specific_files:\n dirs = [ # load files specified by self.dataset_splits\n \"mathematics_dataset-v1.0/\" + expand_split(pair[\"split\"])\n for pair in self.dataset_splits\n if not(\n pair[\"split\"] == problem.DatasetSplit.TRAIN or\n pair[\"split\"] == problem.DatasetSplit.EVAL or\n pair[\"split\"] == problem.DatasetSplit.TEST\n )\n ]\n if dataset_split == problem.DatasetSplit.TRAIN:\n dirs = train_dirs\n dirs = [os.path.join(tmp_dir, d) for d in dirs]\n\n # Iterate over directories and files generating examples.\n for d in dirs:\n if specific_files:\n files = tf.gfile.Glob(d + \".txt\")\n else:\n files = tf.gfile.Glob(d + \"/*.txt\")\n for fname in files:\n # In each text file, the first line is the input, the next the answer,\n # and so on until the end of the file.\n cur_input = None\n with tf.gfile.Open(fname, \"rb\") as f:\n for line in f:\n if cur_input is None:\n # cur_input = line.strip()\n cur_input = str(line)\n else:\n yield {\"inputs\": cur_input, \"targets\": str(line)}\n # yield {\"inputs\": cur_input, \"targets\": line.strip()}\n cur_input = None\n"
] |
[
[
"tensorflow.compat.v1.gfile.Exists"
],
[
"tensorflow.compat.v1.python_io.tf_record_iterator",
"tensorflow.compat.v1.train.Example"
],
[
"tensorflow.compat.v1.Session",
"tensorflow.compat.v1.global_variables_initializer",
"tensorflow.compat.v1.constant",
"numpy.random.randint"
],
[
"tensorflow.compat.v1.gfile.Glob",
"tensorflow.compat.v1.gfile.Open"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rcmdnk/phys_learning
|
[
"2ea0b3e133aed1f57ede03c8ab0c43487a2e0266"
] |
[
"src/phys_learning/core.py"
] |
[
"import numpy as np\nfrom var_learning.core import VarLearning\nfrom var_learning.formula import Formula\nfrom var_learning.plot import hist_two\nfrom .two_particles import TwoParticles\nfrom .phys import hist_two_phys, mass, pt\n\n\nclass PhysLearning(VarLearning):\n def __init__(self, signal=None, bg=None,\n var_labels=[\"px1\", \"py1\", \"pz1\", \"e1\",\n \"px2\", \"py2\", \"pz2\", \"e2\"],\n **kw):\n data = kw['data'] if 'data' in kw else None\n if data is None:\n if signal is not None:\n data = TwoParticles(signal, 1).data\n if bg is not None:\n bg = TwoParticles(bg, 0).data\n if data is None:\n data = bg\n else:\n data = np.concatenate([data, bg])\n\n super().__init__(data=data, var_labels=var_labels, **kw)\n\n self.cmd.update({'original_hist': self.original_hist,\n 'my_hist': self.my_hist,\n 'x1': self.x1,\n 'x2': self.x2,\n 'y1': self.y2,\n 'y2': self.y2,\n 'z1': self.z1,\n 'z2': self.z2,\n 'e1': self.e1,\n 'e2': self.e2,\n 'mass': self.mass,\n 'mass_pt': self.mass_pt})\n\n def get_signal(self):\n is_signal = np.array(self.data[:, -1], bool)\n return np.array(self.data[:, 0:-1])[is_signal]\n\n def get_bg(self):\n is_signal = np.array(self.data[:, -1], bool)\n return np.array(self.data[:, 0:-1])[~is_signal]\n\n def original_hist(self):\n hist_two_phys(self.get_signal(), self.get_bg(),\n self.name + \"_original\")\n\n def my_hist(self):\n import json\n\n with open(self.json) as f:\n rpn = json.load(f)\n for i, r in enumerate(rpn):\n formula = Formula(8, var_labels=self.var_labels)\n formula.rpn = r\n var_signal = formula.calc(self.get_signal())\n var_bg = formula.calc(self.get_bg())\n sb = np.concatenate([var_signal, var_bg], 0)\n\n per10 = np.percentile(sb, 10)\n per90 = np.percentile(sb, 90)\n center = (per90 + per10) / 2\n xmin = 2 * per10 - center\n xmax = 2 * per90 - center\n\n xlabel = formula.get_formula()\n if len(xlabel) > 50:\n xlabel = xlabel[:47] + '...'\n\n hist_two(var_signal, var_bg, 100, [xmin, xmax],\n '{}_{}'.format(self.name, i), xlabel=xlabel,\n label1='signal', label2='bg')\n print('{}_{}'.format(self.name, i))\n # return pseudo values\n return None, None\n\n def x1(self):\n return self.one_var(0)\n\n def y1(self):\n return self.one_var(1)\n\n def z1(self):\n return self.one_var(2)\n\n def e1(self):\n return self.one_var(3)\n\n def x2(self):\n return self.one_var(4)\n\n def y2(self):\n return self.one_var(5)\n\n def z2(self):\n return self.one_var(6)\n\n def e2(self):\n return self.one_var(7)\n\n def one_var(self, i):\n x_train = self.x_train[:, i:i + 1]\n x_test = self.x_test[:, i:i + 1]\n self.make_classifier(self.name + \"_\" + self.formula.var_labels[i],\n x_train, x_test)\n acc = self.classifier.run_all()\n value = self.formula.var_labels[i]\n print('{:.3f} {}'.format(acc, value))\n return acc, value\n\n def mass(self):\n x_train = [[x] for x\n in mass(self.x_train[:, 0:4], self.x_train[:, 4:8])]\n x_test = [[x] for x in mass(self.x_test[:, 0:4], self.x_test[:, 4:8])]\n self.make_classifier(self.name + \"_mass\", x_train, x_test)\n acc = self.classifier.run_all()\n values = 'm12'\n print('{:.3f} {}'.format(acc, values))\n return acc, values\n\n def mass_pt(self):\n x_train = np.array([mass(self.x_train[:, 0:4], self.x_train[:, 4:8]),\n pt(self.x_train[:, 0], self.x_train[:, 1]),\n pt(self.x_train[:, 4], self.x_train[:, 5])]).T\n x_test = np.array([mass(self.x_test[:, 0:4], self.x_test[:, 4:8]),\n pt(self.x_test[:, 0], self.x_test[:, 1]),\n pt(self.x_test[:, 4], self.x_test[:, 5])]).T\n self.make_classifier(self.name + \"_mass_pt\", x_train, x_test)\n acc = self.classifier.run_all()\n values = 'm12, pt1, pt2'\n print('{:.3f} {}'.format(acc, values))\n return acc, values\n\n def prediction_check(self, model, x_test):\n predictions = model.predict(x_test)\n sig_sig = []\n sig_bg = []\n bg_sig = []\n bg_bg = []\n for i in range(predictions.size):\n if self.y_test[i] == 1:\n if predictions[i] > 0.5:\n sig_sig.append(self.x_test[i])\n else:\n sig_bg.append(self.x_test[i])\n elif predictions[i] > 0.5:\n bg_sig.append(self.x_test[i])\n else:\n bg_bg.append(self.x_test[i])\n sig_sig = np.array(sig_sig)\n sig_bg = np.array(sig_bg)\n bg_sig = np.array(bg_sig)\n bg_bg = np.array(bg_bg)\n hist_two_phys(sig_sig, sig_bg, self.name + \"_sig_check\")\n hist_two_phys(bg_sig, bg_bg, self.name + \"_bg_check\")\n"
] |
[
[
"numpy.concatenate",
"numpy.array",
"numpy.percentile"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mustaric/TESS_JWST_calibration_stars
|
[
"045507685309d7aab609be6c98721e4ee3a5701b"
] |
[
"paper_plots_tables.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Sep 1 11:57:52 2021\n\n@author: smullally\n\"\"\"\n\nimport Susan \nimport matplotlib.pyplot as plt\nimport numpy as np\n\noutdir = \"/Users/smullally/Science/tess_monitor_standards/paper/plots/\"\n#Directory of the data files\nddir = \"/Users/smullally/Science/tess_monitor_standards/detrended_standards/good/\"\n#The list of names and filenames\ninfile = \"/Users/smullally/Science/tess_monitor_standards/paper/plots/inputfilenames.csv\"\n\nfilenames = np.loadtxt(infile, dtype=str, delimiter=',')\n\n#This needs to be run first and then gets filled in below, one at a time.\nstats = np.zeros((len(filenames[:,0]),5))\n\n#%%\n\n#for f in filenames[:,0]:\n# Susan.create_lcft_plot(ddir+f, periods = [.004,12], times=None)\n#%%\n\ni = 0\npers = [0.5,12]\ntimes = [2174, 2230]\nlabel = \"%s\\nTIC %u\\nSector 32-33\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n\n#%%\ni = 1\npers = [0.1,12]\ntimes = [2204, 2214.5]\nlabel = \"%s\\nTIC %u\\nSector 33\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n\n#%\ni = 2\npers = [0.08,12]\ntimes = None\nlabel = \"%s\\nTIC %u\\nSector 5\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n\n#%%\ni = 3\npers = [0.2,12]\ntimes =[2102, 2113.5]\nlabel = \"%s\\nTIC %u\\nSector 29\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n\n#%\ni = 4\npers = [0.015,12]\ntimes = [1815.8, 1828]\nlabel = \"%s\\nTIC %u\\nSector 19\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n\n#%\ni = 5\npers = [0.014,5]\ntimes = [2406,2409]\nlabel = \"%s\\nTIC %u\\nSector 40\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n\n#%\ni = 6\npers = [0.05,10]\ntimes = None\nlabel = \"%s\\nTIC %u\\nSector 40\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n\n#%\ni = 7\npers = [0.01,12]\ntimes = [2389.5,2404.9]\nlabel = \"%s\\nTIC %u\\nSector 40\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n#%\ni = 8\npers = [0.4,12]\ntimes = [2390, 2405]\nlabel = \"%s\\nTIC %u\\nSector 40\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n#%\ni = 9\npers = [0.4,12]\ntimes = [1751,1763.5]\nlabel = \"%s\\nTIC %u\\nSector 16\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n#%\ni = 10\npers = [0.2,12]\ntimes = [1855.8,1869]\nlabel = \"%s\\nTIC %u\\nSector 20\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n#%\ni = 11\npers = [0.4,12]\ntimes = None\nlabel = \"%s\\nTIC %u\\nSector 21\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n#\ni = 12\npers = [0.04,8]\ntimes = None\nlabel = \"%s\\nTIC %u\\nSector 33\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n#%\ni = 13\npers = [0.6,14]\ntimes = None\nlabel = \"%s\\nTIC %u\\nSector 1\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n#%\ni = 14\npers = [0.2,12]\ntimes = None\nlabel = \"%s\\nTIC %u\\nSector 32\" % (filenames[i,1], int(filenames[i,0][7:17]))\nSusan.create_lcft_plot(ddir+filenames[i,0], periods = pers, times=times, label = label)\nplt.savefig(outdir + filenames[i,0] + \".png\")\nret = Susan.calc_variable_stats(ddir+filenames[i,0], ticid = int(filenames[i,0][7:17]), periods=pers)\nstats[i,:] = ret\n\n#%%\n\n#Need run above cells first\nofn = \"/Users/smullally/Science/tess_monitor_standards/paper/variable_stats.csv\"\nform = (\"%u\", \"%5.5f\", \"%.4f\", \"%.4f\", \"%.4f\")\nnp.savetxt(fname= ofn, X = stats, delimiter=\",\", \n header = \"TIC,period_at_max,max_amplitude,2sigma_pkpk,3pk2pk\",\n fmt = form)\n\n#%%\n#Get Crowdsap for all stars.\nimport lightkurve as lk\nimport pandas as p\ntarget_name_file = \"/Users/smullally/Science/tess_monitor_standards/paper/target_names.csv\"\ntargets = p.read_csv(target_name_file,delimiter=',', header=\"infer\")\n\nfor i,t in targets.iterrows():\n search = lk.search_lightcurve(\"TIC %u\" % t['ticid'], mission='TESS', \n author = ['SPOC','TESS-SPOC'])\n try:\n lc = search[search.author=='SPOC'][0].download()\n except:\n lc = search[0].download() \n print(t['ticid'],lc.CROWDSAP,lc.TIMEDEL)\n#%%\n#Create the Variable table with Pandas\nimport pandas as p\ntarget_name_file = \"/Users/smullally/Science/tess_monitor_standards/paper/target_names.csv\"\ntargets = p.read_csv(target_name_file,delimiter=',', header=\"infer\")\n\nvar_stats = p.read_csv(ofn)\nvarstars = targets['var'] == 'y'\nvstats = p.DataFrame({'ticid':[],'name':[],'crowdsap':[],'period_max':[],'amp_max':[],'pkpk':[]})\n\nfor i,t in targets[varstars].iterrows():\n want = var_stats['# TIC'] == t['ticid']\n pmax = float(var_stats[want]['period_at_max'])\n amax = float(var_stats[want]['max_amplitude'])\n pkpk = float(var_stats[want]['2sigma_pkpk'])\n newstat = p.DataFrame({'ticid':str(int(t['ticid'])),'name':t['name'],\n 'crowdsap':t['crowdsap'], 'pmax':[pmax],\n 'amax':[amax], 'pkpk':[pkpk]})\n vstats = vstats.append(newstat)\n\ntabfile = \"/Users/smullally/Science/tess_monitor_standards/paper/var_stats.tab\"\nvstats.to_latex(buf=tabfile, column_format='llrrrr', index = False,\n columns = ['name','ticid','crowdsap','pmax','amax','pkpk'],\n float_format = \"%.4f\") \n\n\n#%%\n\ninfile = \"/Users/smullally/Science/tess_monitor_standards/detrended_standards/good/all_data.txt\"\noutfile = \"/Users/smullally/Science/tess_monitor_standards/detrended_standards/good/all_data_stats.txt\"\nddir = \"/Users/smullally/Science/tess_monitor_standards/detrended_standards/good/\"\nfilenames = np.loadtxt(infile,dtype=str)\n\nstats = np.zeros((len(filenames), 5))\n\nfor i,f in enumerate(filenames):\n ticid = int(f[5:17])\n sector = int(f[19:22])\n \n #4 sigma long and short period limits from periodogram.\n #Bin size is used to bin for the largest variation stat.\n short, long, largest = Susan.nonvar_stats(ddir + f, period = 1, \n long_bin_size = .02083)\n \n stats[i,:] = np.array([ticid, sector, 100*short, 100*long, 100*largest])\n\nform = (\"%u\", \"%.4f\", \"%.4f\", \"%.4f\", \"%.4f\")\nnp.savetxt(outfile, stats, delimiter = \",\", fmt = form, header = \"TIC,sector,short,long,3siglarge\")\n\n \n#%%\nimport pandas as p\n#The outfile here is the above csv file calculating stats for all data files.\noutfile = \"/Users/smullally/Science/tess_monitor_standards/detrended_standards/good/all_data_stats.txt\"\nnovar_stats = p.read_csv(outfile)\n#The following files comes from the Target_Stars_names excel sheet 1\ntarget_name_file = \"/Users/smullally/Science/tess_monitor_standards/paper/target_names.csv\"\ntargets = p.read_csv(target_name_file,delimiter=',', header=\"infer\")\n\nnovar_list = \"/Users/smullally/Science/tess_monitor_standards/paper/\" #contain TIC and actual name\n\nstats = p.DataFrame({'ticid':[],'name':[],'crowdsap':[],'short':[],'long':[],'large':[]})\n\nnotvariable_ones = targets['var'] == 'n'\n\nfor i,t in targets[notvariable_ones].iterrows():\n want = novar_stats['# TIC'] == t['ticid']\n min_short = np.min(novar_stats[want]['short'])\n min_long = np.min(novar_stats[want]['long'])\n min_large = np.min(novar_stats[want]['3siglarge'])\n newstat = p.DataFrame({'ticid':str(int(t['ticid'])),'name':t['name'], \n 'crowdsap':t['crowdsap'], \n 'short':[min_short],'long':[min_long], \n 'large':[min_large]})\n #print(newstat)\n stats = stats.append(newstat) \n\ntabfile = \"/Users/smullally/Science/tess_monitor_standards/paper/novar_stats.tab\"\nstats.to_latex(buf=tabfile, column_format='lllrrr', index = False,\n columns = ['name','ticid','crowdsap','short','long','large'],\n float_format = \"%.4f\")\n\n\n#%%\n#Target Table\nfrom astroquery.mast import Catalogs\nfrom astropy.coordinates import SkyCoord\ntarget_name_file = \"/Users/smullally/Science/tess_monitor_standards/paper/target_names.csv\"\ntargets = p.read_csv(target_name_file,delimiter=',', header=\"infer\")\n\nticids = np.array(targets['ticid'])\ntic_data = Catalogs.query_criteria(catalog=\"Tic\",ID = ticids)\ntic_data.add_column('skycoord')\n\n#for i,tic in enumerate(tic_data['ID']):\nsc = SkyCoord(tic_data['ra'], tic_data['dec'],unit='deg',frame='icrs')\nskycoordstr = sc.to_string('hmsdms', precision=0)\ntic_data['skycoord'] = skycoordstr\ntic_p = tic_data.to_pandas()\n\n#tic_data[['ID','Tmag','Teff','logg','skycoord']]\n\ntarg_tab = p.DataFrame({'ticid':[],'name':[],'coord':[],'Tmag':[],'sptype':[],'cadence':[]})\nfor i,t in targets.iterrows():\n want = tic_p['ID'].astype(int) == int(t['ticid'])\n tmag = \"%.1f\" % float(tic_p[want]['Tmag'])\n scstr = tic_p[want]['skycoord'].item()\n row = p.DataFrame({'ticid':[str(int(t['ticid']))],'name':[t['name']],\n 'coord':[scstr],'Tmag':[tmag],\n 'sptype':[t['sptype']],'cadence':[t['dtype']]})\n targ_tab = targ_tab.append(row)\n\ntarg_file = \"/Users/smullally/Science/tess_monitor_standards/paper/target_table.tab\" \ntarg_tab.to_latex(buf = targ_file, column_format = \"llllll\", index=False, \n columns = ['name','ticid','sptype','Tmag','cadence','coord']) "
] |
[
[
"pandas.read_csv",
"numpy.min",
"matplotlib.pyplot.savefig",
"pandas.DataFrame",
"numpy.savetxt",
"numpy.array",
"numpy.loadtxt"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.3",
"1.1",
"1.5",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
cgajagon/Trading-App
|
[
"10ffbea9dec3deca94489bf62d9f79e22f5ebebc"
] |
[
"trading/services.py"
] |
[
"import requests\nimport json\nimport pandas as pd\nfrom django.conf import settings\n\nfrom rest_framework.response import Response\nfrom rest_framework import status\n\nfrom trading import serializers, models\n\n# 3rd Part API: iextrading.com\ndef get_symbols():\n url = 'https://api.iextrading.com/1.0/ref-data/symbols'\n response = requests.get(url)\n if response.status_code==200:\n symbols = response.json()\n return symbols\n else:\n print(\"There is no connexion\")\n\ndef get_quote(symbol):\n endpoint = 'https://api.iextrading.com/1.0/tops/last?symbols={symbol}'\n url = endpoint.format(symbol=symbol)\n response =requests.get(url)\n if response.status_code==200:\n quote = response.json()\n return quote\n else:\n print(\"There is no connexion\")\n\ndef get_chart_data(symbol, time_interval):\n endpoint = 'https://sandbox.iexapis.com/stable/stock/{symbol}/chart/{time_interval}?token=Tpk_e023b4e95edb425c9dc89ee4c6972086'\n url = endpoint.format(symbol=symbol, time_interval=time_interval)\n response =requests.get(url)\n if response.status_code==200:\n history = response.json()\n # Trasnform output\n df = pd.DataFrame(history)\n df_resample = pd.DataFrame(columns=['min_close','min_open','max_high','min_low', 'mean_volume'])\n interval = 'M'\n df['date'] = pd.to_datetime(df['date'])\n df.index = df['date'] \n df_resample['min_close'] = df['close'].resample(interval).min()\n df_resample['min_open'] = df['open'].resample(interval).min()\n df_resample['max_high'] = df['high'].resample(interval).max()\n df_resample['min_low'] = df['low'].resample(interval).min()\n df_resample['mean_volume'] = df['volume'].resample(interval).mean()\n df_resample['date'] = df_resample.index.date\n data = df_resample.to_dict('records')\n #data = {\n # 'date':list(df_resample['date']),\n # 'high':list(df_resample['max_high']),\n # 'low':list(df_resample['min_low']),\n # 'open':list(df_resample['min_open']),\n # 'close':list(df_resample['min_close']),\n # 'volume':list(df_resample['mean_volume']),\n #}\n return data\n else:\n print(\"There is no connexion\")\n\ndef get_history_data(symbol, time_interval):\n endpoint = 'https://sandbox.iexapis.com/stable/stock/{symbol}/chart/{time_interval}?token=Tpk_e023b4e95edb425c9dc89ee4c6972086'\n url = endpoint.format(symbol=symbol, time_interval=time_interval)\n response =requests.get(url)\n if response.status_code==200:\n history = response.json()\n return history\n else:\n print(\"There is no connexion\")\n\n\n# 3rd Part API: yahoo-finance.com\ndef get_chart_data2(symbol, time_interval, time_range):\n endpoint = \"https://apidojo-yahoo-finance-v1.p.rapidapi.com/stock/v2/get-chart?interval={interval}®ion=US&symbol={symbol}&lang=en&range={range}\"\n url = endpoint.format(source_lang='en', region='US', symbol=symbol, interval=time_interval, range=time_range )\n headers = {'X-RapidAPI-Host': settings.X_RapidAPI_Host, 'X-RapidAPI-Key': settings.X_RapidAPI_Key}\n response = requests.get(url, headers=headers)\n data = response.json()\n return data"
] |
[
[
"pandas.to_datetime",
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
ibancg/meshio
|
[
"ef56ba90b3a072b75aa504d4dde40c8cb1a0a35e",
"ef56ba90b3a072b75aa504d4dde40c8cb1a0a35e"
] |
[
"meshio/abaqus/_abaqus.py",
"meshio/gmsh/_gmsh22.py"
] |
[
"\"\"\"\nI/O for Abaqus inp files.\n\"\"\"\nimport numpy\n\nfrom ..__about__ import __version__\nfrom .._exceptions import ReadError\nfrom .._files import open_file\nfrom .._helpers import register\nfrom .._mesh import CellBlock, Mesh\n\nabaqus_to_meshio_type = {\n # trusses\n \"T2D2\": \"line\",\n \"T2D2H\": \"line\",\n \"T2D3\": \"line3\",\n \"T2D3H\": \"line3\",\n \"T3D2\": \"line\",\n \"T3D2H\": \"line\",\n \"T3D3\": \"line3\",\n \"T3D3H\": \"line3\",\n # beams\n \"B21\": \"line\",\n \"B21H\": \"line\",\n \"B22\": \"line3\",\n \"B22H\": \"line3\",\n \"B31\": \"line\",\n \"B31H\": \"line\",\n \"B32\": \"line3\",\n \"B32H\": \"line3\",\n \"B33\": \"line3\",\n \"B33H\": \"line3\",\n # surfaces\n \"CPS4\": \"quad\",\n \"CPS4R\": \"quad\",\n \"S4\": \"quad\",\n \"S4R\": \"quad\",\n \"S4RS\": \"quad\",\n \"S4RSW\": \"quad\",\n \"S4R5\": \"quad\",\n \"S8R\": \"quad8\",\n \"S8R5\": \"quad8\",\n \"S9R5\": \"quad9\",\n # \"QUAD\": \"quad\",\n # \"QUAD4\": \"quad\",\n # \"QUAD5\": \"quad5\",\n # \"QUAD8\": \"quad8\",\n # \"QUAD9\": \"quad9\",\n #\n \"CPS3\": \"triangle\",\n \"STRI3\": \"triangle\",\n \"S3\": \"triangle\",\n \"S3R\": \"triangle\",\n \"S3RS\": \"triangle\",\n # \"TRI7\": \"triangle7\",\n # 'TRISHELL': 'triangle',\n # 'TRISHELL3': 'triangle',\n # 'TRISHELL7': 'triangle',\n #\n \"STRI65\": \"triangle6\",\n # 'TRISHELL6': 'triangle6',\n # volumes\n \"C3D8\": \"hexahedron\",\n \"C3D8H\": \"hexahedron\",\n \"C3D8I\": \"hexahedron\",\n \"C3D8IH\": \"hexahedron\",\n \"C3D8R\": \"hexahedron\",\n \"C3D8RH\": \"hexahedron\",\n # \"HEX9\": \"hexahedron9\",\n \"C3D20\": \"hexahedron20\",\n \"C3D20H\": \"hexahedron20\",\n \"C3D20R\": \"hexahedron20\",\n \"C3D20RH\": \"hexahedron20\",\n # \"HEX27\": \"hexahedron27\",\n #\n \"C3D4\": \"tetra\",\n \"C3D4H\": \"tetra4\",\n # \"TETRA8\": \"tetra8\",\n \"C3D10\": \"tetra10\",\n \"C3D10H\": \"tetra10\",\n \"C3D10I\": \"tetra10\",\n \"C3D10M\": \"tetra10\",\n \"C3D10MH\": \"tetra10\",\n # \"TETRA14\": \"tetra14\",\n #\n # \"PYRAMID\": \"pyramid\",\n \"C3D6\": \"wedge\",\n #\n # 4-node bilinear displacement and pore pressure\n \"CAX4P\": \"quad\",\n}\nmeshio_to_abaqus_type = {v: k for k, v in abaqus_to_meshio_type.items()}\n\n\ndef read(filename):\n \"\"\"Reads a Abaqus inp file.\n \"\"\"\n with open_file(filename, \"r\") as f:\n out = read_buffer(f)\n return out\n\n\ndef read_buffer(f):\n # Initialize the optional data fields\n cells = []\n cell_ids = []\n point_sets = {}\n cell_sets = {}\n field_data = {}\n cell_data = {}\n point_data = {}\n\n line = f.readline()\n while True:\n if not line: # EOF\n break\n\n # Comments\n if line.startswith(\"**\"):\n line = f.readline()\n continue\n\n keyword = line.partition(\",\")[0].strip().replace(\"*\", \"\").upper()\n if keyword == \"NODE\":\n points, point_ids, line = _read_nodes(f)\n elif keyword == \"ELEMENT\":\n cell_type, cells_data, ids, line = _read_cells(f, line, point_ids)\n cells.append(CellBlock(cell_type, cells_data))\n cell_ids.append(ids)\n elif keyword == \"NSET\":\n params_map = get_param_map(line, required_keys=[\"NSET\"])\n set_ids, line = _read_set(f, params_map)\n name = params_map[\"NSET\"]\n point_sets[name] = numpy.array(\n [point_ids[point_id] for point_id in set_ids], dtype=\"int32\"\n )\n elif keyword == \"ELSET\":\n params_map = get_param_map(line, required_keys=[\"ELSET\"])\n set_ids, line = _read_set(f, params_map)\n name = params_map[\"ELSET\"]\n cell_sets[name] = []\n for cell_ids_ in cell_ids:\n cell_sets_ = numpy.array(\n [cell_ids_[set_id] for set_id in set_ids if set_id in cell_ids_],\n dtype=\"int32\",\n )\n cell_sets[name].append(cell_sets_)\n else:\n # There are just too many Abaqus keywords to explicitly skip them.\n line = f.readline()\n\n return Mesh(\n points,\n cells,\n point_data=point_data,\n cell_data=cell_data,\n field_data=field_data,\n point_sets=point_sets,\n cell_sets=cell_sets,\n )\n\n\ndef _read_nodes(f):\n points = []\n point_ids = {}\n counter = 0\n while True:\n line = f.readline()\n if not line or line.startswith(\"*\"):\n break\n if line.strip() == \"\":\n continue\n\n line = line.strip().split(\",\")\n point_id, coords = line[0], line[1:]\n point_ids[int(point_id)] = counter\n points.append([float(x) for x in coords])\n counter += 1\n\n return numpy.array(points, dtype=float), point_ids, line\n\n\ndef _read_cells(f, line0, point_ids):\n sline = line0.split(\",\")[1:]\n\n etype_sline = sline[0].upper()\n if \"TYPE\" not in etype_sline:\n raise ReadError(etype_sline)\n\n etype = etype_sline.split(\"=\")[1].strip()\n if etype not in abaqus_to_meshio_type:\n raise ReadError(\"Element type not available: {}\".format(etype))\n\n cell_type = abaqus_to_meshio_type[etype]\n\n cells, idx = [], []\n cell_ids = {}\n counter = 0\n while True:\n line = f.readline()\n if not line or line.startswith(\"*\"):\n break\n if line.strip() == \"\":\n continue\n\n line = line.strip()\n idx += [int(k) for k in filter(None, line.split(\",\"))]\n if not line.endswith(\",\"):\n cell_ids[idx[0]] = counter\n cells.append([point_ids[k] for k in idx[1:]])\n idx = []\n counter += 1\n return cell_type, numpy.array(cells), cell_ids, line\n\n\ndef get_param_map(word, required_keys=None):\n \"\"\"\n get the optional arguments on a line\n\n Example\n -------\n >>> word = 'elset,instance=dummy2,generate'\n >>> params = get_param_map(word, required_keys=['instance'])\n params = {\n 'elset' : None,\n 'instance' : 'dummy2,\n 'generate' : None,\n }\n \"\"\"\n if required_keys is None:\n required_keys = []\n words = word.split(\",\")\n param_map = {}\n for wordi in words:\n if \"=\" not in wordi:\n key = wordi.strip().upper()\n value = None\n else:\n sword = wordi.split(\"=\")\n if len(sword) != 2:\n raise ReadError(sword)\n key = sword[0].strip().upper()\n value = sword[1].strip()\n param_map[key] = value\n\n msg = \"\"\n for key in required_keys:\n if key not in param_map:\n msg += \"{} not found in {}\\n\".format(key, word)\n if msg:\n raise RuntimeError(msg)\n return param_map\n\n\ndef _read_set(f, params_map):\n set_ids = []\n while True:\n line = f.readline()\n if not line or line.startswith(\"*\"):\n break\n if line.strip() == \"\":\n continue\n\n set_ids += [int(k) for k in line.strip().strip(\",\").split(\",\")]\n\n set_ids = numpy.array(set_ids, dtype=\"int32\")\n if \"GENERATE\" in params_map:\n if len(set_ids) != 3:\n raise ReadError(set_ids)\n set_ids = numpy.arange(set_ids[0], set_ids[1] + 1, set_ids[2], dtype=\"int32\")\n return set_ids, line\n\n\ndef write(filename, mesh, float_fmt=\".15e\", translate_cell_names=True):\n with open_file(filename, \"wt\") as f:\n f.write(\"*Heading\\n\")\n f.write(\"Abaqus DataFile Version 6.14\\n\")\n f.write(\"written by meshio v{}\\n\".format(__version__))\n f.write(\"*Node\\n\")\n fmt = \", \".join([\"{}\"] + [\"{:\" + float_fmt + \"}\"] * mesh.points.shape[1]) + \"\\n\"\n for k, x in enumerate(mesh.points):\n f.write(fmt.format(k + 1, *x))\n eid = 0\n for cell_type, node_idcs in mesh.cells:\n name = (\n meshio_to_abaqus_type[cell_type] if translate_cell_names else cell_type\n )\n f.write(\"*Element,type=\" + name + \"\\n\")\n for row in node_idcs:\n eid += 1\n nids_strs = (str(nid + 1) for nid in row.tolist())\n f.write(str(eid) + \",\" + \",\".join(nids_strs) + \"\\n\")\n\n nnl = 8\n for ic in range(len(mesh.cells)):\n for k, v in mesh.cell_sets.items():\n els = [str(i + 1) for i in v[ic]]\n f.write(\"*ELSET, ELSET=%s\\n\" % k)\n f.write(\n \",\\n\".join(\n \",\".join(els[i : i + nnl]) for i in range(0, len(els), nnl)\n )\n + \"\\n\"\n )\n\n for k, v in mesh.point_sets.items():\n nds = [str(i + 1) for i in v]\n f.write(\"*NSET, NSET=%s\\n\" % k)\n f.write(\n \",\\n\".join(\",\".join(nds[i : i + nnl]) for i in range(0, len(nds), nnl))\n + \"\\n\"\n )\n\n f.write(\"*end\")\n\n\nregister(\"abaqus\", [\".inp\"], read, {\"abaqus\": write})\n",
"\"\"\"\nI/O for Gmsh's msh format, cf.\n<http://gmsh.info//doc/texinfo/gmsh.html#File-formats>.\n\"\"\"\nimport logging\n\nimport numpy\n\nfrom .._common import cell_data_from_raw, raw_from_cell_data\nfrom .._exceptions import ReadError, WriteError\nfrom .._mesh import CellBlock, Mesh\nfrom .common import (\n _gmsh_to_meshio_order,\n _gmsh_to_meshio_type,\n _meshio_to_gmsh_order,\n _meshio_to_gmsh_type,\n _read_data,\n _read_physical_names,\n _write_data,\n _write_physical_names,\n num_nodes_per_cell,\n)\n\nc_int = numpy.dtype(\"i\")\nc_double = numpy.dtype(\"d\")\n\n\ndef read_buffer(f, is_ascii, data_size):\n # The format is specified at\n # <http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>.\n\n # Initialize the optional data fields\n points = []\n cells = []\n field_data = {}\n cell_data_raw = {}\n cell_tags = {}\n point_data = {}\n periodic = None\n while True:\n line = f.readline().decode(\"utf-8\")\n if not line:\n # EOF\n break\n if line[0] != \"$\":\n raise ReadError()\n environ = line[1:].strip()\n\n if environ == \"PhysicalNames\":\n _read_physical_names(f, field_data)\n elif environ == \"Nodes\":\n points, point_tags = _read_nodes(f, is_ascii, data_size)\n elif environ == \"Elements\":\n has_additional_tag_data, cell_tags = _read_cells(\n f, cells, point_tags, is_ascii\n )\n elif environ == \"Periodic\":\n periodic = _read_periodic(f)\n elif environ == \"NodeData\":\n _read_data(f, \"NodeData\", point_data, data_size, is_ascii)\n elif environ == \"ElementData\":\n _read_data(f, \"ElementData\", cell_data_raw, data_size, is_ascii)\n else:\n # skip environment\n while line != \"$End\" + environ:\n line = f.readline().decode(\"utf-8\").strip()\n\n if has_additional_tag_data:\n logging.warning(\"The file contains tag data that couldn't be processed.\")\n\n cell_data = cell_data_from_raw(cells, cell_data_raw)\n\n # merge cell_tags into cell_data\n for name, tag_dict in cell_tags.items():\n if name not in cell_data:\n cell_data[name] = []\n for cell_type, _ in cells:\n tags = tag_dict.get(cell_type, [])\n cell_data[name].append(tags)\n\n return Mesh(\n points,\n cells,\n point_data=point_data,\n cell_data=cell_data,\n field_data=field_data,\n gmsh_periodic=periodic,\n )\n\n\ndef _read_nodes(f, is_ascii, data_size):\n # The first line is the number of nodes\n line = f.readline().decode(\"utf-8\")\n num_nodes = int(line)\n if is_ascii:\n points = numpy.fromfile(f, count=num_nodes * 4, sep=\" \").reshape((num_nodes, 4))\n # The first number is the index\n point_tags = points[:, 0]\n points = points[:, 1:]\n else:\n # binary\n dtype = [(\"index\", c_int), (\"x\", c_double, (3,))]\n data = numpy.fromfile(f, count=num_nodes, dtype=dtype)\n if not (data[\"index\"] == range(1, num_nodes + 1)).all():\n raise ReadError()\n points = numpy.ascontiguousarray(data[\"x\"])\n point_tags = data[\"index\"]\n\n # Fast forward to $EndNodes\n line = f.readline().decode(\"utf-8\")\n while line.strip() != \"$EndNodes\":\n line = f.readline().decode(\"utf-8\")\n return points, point_tags\n\n\ndef _read_cells(f, cells, point_tags, is_ascii):\n # The first line is the number of elements\n line = f.readline().decode(\"utf-8\")\n total_num_cells = int(line)\n has_additional_tag_data = False\n cell_tags = {}\n if is_ascii:\n _read_cells_ascii(f, cells, cell_tags, total_num_cells)\n else:\n _read_cells_binary(f, cells, cell_tags, total_num_cells)\n cells[:] = _gmsh_to_meshio_order(cells)\n\n point_tags = numpy.asarray(point_tags, dtype=numpy.int32) - 1\n remap = -numpy.ones((numpy.max(point_tags) + 1,), dtype=numpy.int32)\n remap[point_tags] = numpy.arange(point_tags.shape[0])\n\n for ic, (ct, cd) in enumerate(cells):\n cells[ic] = (ct, remap[cd])\n\n # Fast forward to $EndElements\n line = f.readline().decode(\"utf-8\")\n while line.strip() != \"$EndElements\":\n line = f.readline().decode(\"utf-8\")\n\n # restrict to the standard two data items (physical, geometrical)\n output_cell_tags = {\"gmsh:physical\": {}, \"gmsh:geometrical\": {}}\n for cell_type in cell_tags:\n physical = []\n geometrical = []\n for item in cell_tags[cell_type]:\n if len(item) > 0:\n physical.append(item[0])\n if len(item) > 1:\n geometrical.append(item[1])\n if len(item) > 2:\n has_additional_tag_data = True\n physical = numpy.array(physical, dtype=c_int)\n geometrical = numpy.array(geometrical, dtype=c_int)\n if len(physical) > 0:\n output_cell_tags[\"gmsh:physical\"][cell_type] = physical\n if len(geometrical) > 0:\n output_cell_tags[\"gmsh:geometrical\"][cell_type] = geometrical\n\n return has_additional_tag_data, output_cell_tags\n\n\ndef _read_cells_ascii(f, cells, cell_tags, total_num_cells):\n for _ in range(total_num_cells):\n line = f.readline().decode(\"utf-8\")\n data = [int(k) for k in filter(None, line.split())]\n t = _gmsh_to_meshio_type[data[1]]\n num_nodes_per_elem = num_nodes_per_cell[t]\n\n if len(cells) == 0 or t != cells[-1][0]:\n cells.append((t, []))\n cells[-1][1].append(data[-num_nodes_per_elem:])\n\n # data[2] gives the number of tags. The gmsh manual\n # <http://gmsh.info/doc/texinfo/gmsh.html#MSH-ASCII-file-format>\n # says:\n # >>>\n # By default, the first tag is the number of the physical entity to which the\n # element belongs; the second is the number of the elementary geometrical entity\n # to which the element belongs; the third is the number of mesh partitions to\n # which the element belongs, followed by the partition ids (negative partition\n # ids indicate ghost cells). A zero tag is equivalent to no tag. Gmsh and most\n # codes using the MSH 2 format require at least the first two tags (physical and\n # elementary tags).\n # <<<\n num_tags = data[2]\n if t not in cell_tags:\n cell_tags[t] = []\n cell_tags[t].append(data[3 : 3 + num_tags])\n\n # convert to numpy arrays\n # Subtract one to account for the fact that python indices are 0-based.\n for k, c in enumerate(cells):\n cells[k] = (c[0], numpy.array(c[1], dtype=c_int) - 1)\n # Cannot convert cell_tags[key] to numpy array: There may be a different number of\n # tags for each cell.\n\n\ndef _read_cells_binary(f, cells, cell_tags, total_num_cells):\n num_elems = 0\n while num_elems < total_num_cells:\n # read element header\n elem_type, num_elems0, num_tags = numpy.fromfile(f, count=3, dtype=c_int)\n t = _gmsh_to_meshio_type[elem_type]\n num_nodes_per_elem = num_nodes_per_cell[t]\n\n # read element data\n shape = (num_elems0, 1 + num_tags + num_nodes_per_elem)\n count = shape[0] * shape[1]\n data = numpy.fromfile(f, count=count, dtype=c_int).reshape(shape)\n\n if len(cells) == 0 or t != cells[-1][0]:\n cells.append((t, []))\n cells[-1][1].append(data[:, -num_nodes_per_elem:])\n\n if t not in cell_tags:\n cell_tags[t] = []\n cell_tags[t].append(data[:, 1 : num_tags + 1])\n\n num_elems += num_elems0\n\n # collect cells\n for k, c in enumerate(cells):\n cells[k] = (c[0], numpy.vstack(c[1]) - 1)\n\n # collect cell tags\n for key in cell_tags:\n cell_tags[key] = numpy.vstack(cell_tags[key])\n\n\ndef _read_periodic(f):\n periodic = []\n num_periodic = int(f.readline().decode(\"utf-8\"))\n for _ in range(num_periodic):\n line = f.readline().decode(\"utf-8\")\n edim, stag, mtag = [int(s) for s in line.split()]\n line = f.readline().decode(\"utf-8\").strip()\n if line.startswith(\"Affine\"):\n affine = line.replace(\"Affine\", \"\", 1)\n affine = numpy.fromstring(affine, float, sep=\" \")\n num_nodes = int(f.readline().decode(\"utf-8\"))\n else:\n affine = None\n num_nodes = int(line)\n slave_master = []\n for _ in range(num_nodes):\n line = f.readline().decode(\"utf-8\")\n snode, mnode = [int(s) for s in line.split()]\n slave_master.append([snode, mnode])\n slave_master = numpy.array(slave_master, dtype=c_int).reshape(-1, 2)\n slave_master -= 1 # Subtract one, Python is 0-based\n periodic.append([edim, (stag, mtag), affine, slave_master])\n line = f.readline().decode(\"utf-8\")\n if line.strip() != \"$EndPeriodic\":\n raise ReadError()\n return periodic\n\n\ndef write(filename, mesh, float_fmt=\".15e\", binary=True):\n \"\"\"Writes msh files, cf.\n <http://gmsh.info//doc/texinfo/gmsh.html#MSH-ASCII-file-format>.\n \"\"\"\n if mesh.points.shape[1] == 2:\n logging.warning(\n \"msh2 requires 3D points, but 2D points given. \"\n \"Appending 0 third component.\"\n )\n mesh.points = numpy.column_stack(\n [mesh.points[:, 0], mesh.points[:, 1], numpy.zeros(mesh.points.shape[0])]\n )\n\n if binary:\n for k, (key, value) in enumerate(mesh.cells):\n if value.dtype != c_int:\n logging.warning(\n \"Binary Gmsh needs 32-bit integers (got %s). Converting.\",\n value.dtype,\n )\n mesh.cells[k] = CellBlock(key, numpy.array(value, dtype=c_int))\n\n cells = _meshio_to_gmsh_order(mesh.cells)\n\n with open(filename, \"wb\") as fh:\n mode_idx = 1 if binary else 0\n size_of_double = 8\n fh.write(\n \"$MeshFormat\\n2.2 {} {}\\n\".format(mode_idx, size_of_double).encode(\"utf-8\")\n )\n if binary:\n numpy.array([1], dtype=c_int).tofile(fh)\n fh.write(b\"\\n\")\n fh.write(b\"$EndMeshFormat\\n\")\n\n if mesh.field_data:\n _write_physical_names(fh, mesh.field_data)\n\n # Split the cell data: gmsh:physical and gmsh:geometrical are tags, the rest is\n # actual cell data.\n tag_data = {}\n other_data = {}\n for key, d in mesh.cell_data.items():\n if key in [\"gmsh:physical\", \"gmsh:geometrical\", \"cell_tags\"]:\n tag_data[key] = d\n else:\n other_data[key] = d\n\n _write_nodes(fh, mesh.points, float_fmt, binary)\n _write_elements(fh, cells, tag_data, binary)\n if mesh.gmsh_periodic is not None:\n _write_periodic(fh, mesh.gmsh_periodic)\n for name, dat in mesh.point_data.items():\n _write_data(fh, \"NodeData\", name, dat, binary)\n\n cell_data_raw = raw_from_cell_data(other_data)\n for name, dat in cell_data_raw.items():\n _write_data(fh, \"ElementData\", name, dat, binary)\n\n\ndef _write_nodes(fh, points, float_fmt, binary):\n fh.write(b\"$Nodes\\n\")\n fh.write(\"{}\\n\".format(len(points)).encode(\"utf-8\"))\n if binary:\n dtype = [(\"index\", c_int), (\"x\", c_double, (3,))]\n tmp = numpy.empty(len(points), dtype=dtype)\n tmp[\"index\"] = 1 + numpy.arange(len(points))\n tmp[\"x\"] = points\n tmp.tofile(fh)\n fh.write(b\"\\n\")\n else:\n fmt = \"{} \" + \" \".join(3 * [\"{:\" + float_fmt + \"}\"]) + \"\\n\"\n for k, x in enumerate(points):\n fh.write(fmt.format(k + 1, x[0], x[1], x[2]).encode(\"utf-8\"))\n fh.write(b\"$EndNodes\\n\")\n\n\ndef _write_elements(fh, cells, tag_data, binary):\n # write elements\n fh.write(b\"$Elements\\n\")\n # count all cells\n total_num_cells = sum([c.shape[0] for _, c in cells])\n fh.write(\"{}\\n\".format(total_num_cells).encode(\"utf-8\"))\n\n consecutive_index = 0\n for k, (cell_type, node_idcs) in enumerate(cells):\n tags = []\n for name in [\"gmsh:physical\", \"gmsh:geometrical\", \"cell_tags\"]:\n try:\n tags.append(tag_data[name][k])\n except KeyError:\n pass\n fcd = numpy.concatenate([tags]).astype(c_int).T\n\n if len(fcd) == 0:\n fcd = numpy.empty((len(node_idcs), 0), dtype=c_int)\n\n if binary:\n # header\n header = [_meshio_to_gmsh_type[cell_type], node_idcs.shape[0], fcd.shape[1]]\n numpy.array(header, dtype=c_int).tofile(fh)\n # actual data\n a = numpy.arange(len(node_idcs), dtype=c_int)[:, numpy.newaxis]\n a += 1 + consecutive_index\n array = numpy.hstack([a, fcd, node_idcs + 1])\n if array.dtype != c_int:\n raise WriteError(\n \"Wrong dtype (require c_int, got {})\".format(array.dtype)\n )\n array.tofile(fh)\n else:\n form = (\n \"{} \"\n + str(_meshio_to_gmsh_type[cell_type])\n + \" \"\n + str(fcd.shape[1])\n + \" {} {}\\n\"\n )\n for k, c in enumerate(node_idcs):\n fh.write(\n form.format(\n consecutive_index + k + 1,\n \" \".join([str(val) for val in fcd[k]]),\n # a bit clumsy for `c+1`, but if c is uint64, c+1 is float64\n \" \".join([str(cc) for cc in c + numpy.array(1, dtype=c.dtype)]),\n ).encode(\"utf-8\")\n )\n\n consecutive_index += len(node_idcs)\n if binary:\n fh.write(b\"\\n\")\n fh.write(b\"$EndElements\\n\")\n\n\ndef _write_periodic(fh, periodic):\n fh.write(b\"$Periodic\\n\")\n fh.write(\"{}\\n\".format(len(periodic)).encode(\"utf-8\"))\n for dim, (stag, mtag), affine, slave_master in periodic:\n fh.write(\"{} {} {}\\n\".format(dim, stag, mtag).encode(\"utf-8\"))\n if affine is not None:\n fh.write(b\"Affine \")\n affine = numpy.array(affine, dtype=float)\n affine = numpy.atleast_2d(affine.ravel())\n numpy.savetxt(fh, affine, \"%.16g\")\n slave_master = numpy.array(slave_master, dtype=c_int).reshape(-1, 2)\n slave_master = slave_master + 1 # Add one, Gmsh is 0-based\n fh.write(\"{}\\n\".format(len(slave_master)).encode(\"utf-8\"))\n for snode, mnode in slave_master:\n fh.write(\"{} {}\\n\".format(snode, mnode).encode(\"utf-8\"))\n fh.write(b\"$EndPeriodic\\n\")\n"
] |
[
[
"numpy.arange",
"numpy.array"
],
[
"numpy.hstack",
"numpy.fromfile",
"numpy.ascontiguousarray",
"numpy.arange",
"numpy.asarray",
"numpy.dtype",
"numpy.concatenate",
"numpy.max",
"numpy.fromstring",
"numpy.savetxt",
"numpy.array",
"numpy.zeros",
"numpy.vstack"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
ai-erorr404/opencv-practice
|
[
"60ef5e4aec61ee5f7e675fb919e8f612e59f664a",
"60ef5e4aec61ee5f7e675fb919e8f612e59f664a"
] |
[
"workshops/12-section/2-clazz.py",
"workshops/13-section/7-clazz.py"
] |
[
"#!/usr/bin/env python3\r\n# -*- coding=utf-8 -*-\r\n\r\nimport cv2 as cv\r\nimport numpy as np\r\n\r\n\"\"\"\r\nOpencv DNN 实现图像分类\r\n 使用ImageNet数据集支持1000分类的GoogleNet网络模型,其中label标签是在一个单独的文本文件中读取\r\n \r\n读取模型的API:\r\n cv.dnn.readNetFromCaffe(prototxt, caffeModel)\r\n - prototxt 模型配置文件\r\n - caffeModel 模型的权重二进制文件\r\n\r\n使用模型实现预测的时候,需要读取图像作为输入,网络模型支持的输入数据是四维的输入,所以要把读取到的Mat对象转换为四维张量,OpenCV的提供的API如\r\n下:\r\n cv.dnn.blobFromImage(image, scalefactor, size, mean, swapRB, crop, ddepth)\r\n - image 输入图像\r\n - scalefactor 缩放比列,默认1.0 \r\n - size 网络接受的数据大小\r\n - mean 训练时数据集的均值\r\n - swapRB 是否互换Red与Blur通道\r\n - crop 剪切\r\n - ddepth 数据类型\r\n \r\nps: (模型说明)[https://github.com/opencv/opencv/tree/master/samples/dnn]\r\n\r\nOpenCV可以设置计算机后台与计算目标设备,相关API如下\r\n cv.dnn.setPreferableBackend(backendId)\r\n - backendId 后台计算id DNN_BACKEND_DEFAULT (DNN_BACKEND_INFERENCE_ENGINE)表示默认使用intel的预测推断库\r\n (需要下载安装Intel® OpenVINO™ toolkit, 然后重新编译OpenCV源码,在CMake时候enable该选项方可), 可加速计算!\r\n DNN_BACKEND_OPENCV 一般情况都是使用opencv dnn作为后台计算\r\n \r\n cv.dnn.net.setPreferableTarget(targetId)\r\n - targetId 目标设备ID\r\n \r\n常见的目标设备id如下:\r\n -\tDNN_TARGET_CPU其中表示使用CPU计算,默认是的\r\n -\tDNN_TARGET_OPENCL 表示使用OpenCL加速,一般情况速度都很扯\r\n -\tDNN_TARGET_OPENCL_FP16 可以尝试\r\n -\tDNN_TARGET_MYRIAD 树莓派上的\r\n\r\n关系图\r\n | | DNN_BACKEND_OPENCV | DNN_BACKEND_INFERENCE_ENGINE | DNN_BACKEND_HALIDE |\r\n*|------------------------|--------------------|------------------------------|--------------------|\r\n*| DNN_TARGET_CPU | + | + | + |\r\n*| DNN_TARGET_OPENCL | + | + | + |\r\n*| DNN_TARGET_OPENCL_FP16 | + | + | |\r\n*| DNN_TARGET_MYRIAD | | + | |\r\n*| DNN_TARGET_FPGA | | + | |\r\n\"\"\"\r\n\r\nbin_model = \"../../../raspberry-auto/models/googlenet/bvlc_googlenet.caffemodel\"\r\nprototxt = \"../../../raspberry-auto/models/googlenet/bvlc_googlenet.prototxt\"\r\nclasses_path = \"../../../raspberry-auto/models/googlenet/classification_classes_ILSVRC2012.txt\"\r\n\r\n\r\ndef main():\r\n classes = None\r\n with open(classes_path, \"rt\") as f:\r\n classes = f.read().rstrip(\"\\n\").split(\"\\n\")\r\n net = cv.dnn.readNetFromCaffe(prototxt, bin_model)\r\n image = cv.imread(\"../../../raspberry-auto/pic/Meter.jpg\")\r\n blob = cv.dnn.blobFromImage(image, 1.0, (224, 224), (104, 117, 123), False, crop=False)\r\n result = np.copy(image)\r\n cv.imshow(\"src\", image)\r\n net.setInput(blob)\r\n # 设置目标设备ID与后台计算ID\r\n net.setPreferableBackend(cv.dnn.DNN_BACKEND_OPENCV)\r\n # 默认为CPU计算,且不进行后台计算ID设置\r\n net.setPreferableTarget(cv.dnn.DNN_TARGET_OPENCL)\r\n\r\n out = net.forward()\r\n out = out.flatten()\r\n classId = np.argmax(out)\r\n confidence = out[classId]\r\n t, _ = net.getPerfProfile()\r\n label = \"Inference time: %.2f ms\" % (t * 1000.0 / cv.getTickFrequency())\r\n print(label)\r\n cv.putText(result, label, (0, 15), cv.FONT_HERSHEY_SCRIPT_SIMPLEX, 0.5, (255, 0, 0))\r\n label = \"%s: %.4f\" % (classes[classId] if classes else \"Class #%d\" % classId, confidence)\r\n print(label)\r\n cv.putText(result, label, (0, 50), cv.FONT_HERSHEY_SCRIPT_SIMPLEX, 0.75, (0, 0, 255), 2)\r\n cv.imshow(\"result\", result)\r\n cv.waitKey(0)\r\n\r\n\r\nif \"__main__\" == __name__:\r\n main()\r\n cv.destroyAllWindows()\r\n",
"#!/usr/bin/env python3\n# -*- coding=utf-8 -*-\n\nimport tensorflow as tf\n\n\"\"\"\nTensorFlow - hello world\n\n使用安装的TensorFlow 2.0并导入\n\"\"\"\n\n\ndef main():\n # 导入数据集, 数据集下载地址为: http://yann.lecun.com/exdb/mnist/\n mnist = tf.keras.datasets.mnist\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n # 将整数数据集转换为浮点数\n x_train, x_test = x_train / 255.0, x_test / 255.0\n # 搭建Sequential模型,并将数据堆叠起来\n model = tf.keras.models.Sequential([\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation='relu'),\n tf.keras.layers.Dropout(0.2),\n tf.keras.layers.Dense(10, activation='softmax')\n ])\n model.compile(optimizer='adam',\n loss='sparse_categorical_crossentropy',\n metrics=['accuracy'])\n # 训练\n model.fit(x_train, y_train, epochs=5)\n # 验证\n model.evaluate(x_test, y_test)\n\n\nif \"__main__\" == __name__:\n main()\n"
] |
[
[
"numpy.copy",
"numpy.argmax"
],
[
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Dropout"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"2.6",
"2.4",
"2.3",
"2.5",
"2.2"
]
}
] |
UCD4IDS/sage
|
[
"43474c96d533fd396fe29fe0782d44dc7f5164f7"
] |
[
"src/sage/plot/graphics.py"
] |
[
"# -*- encoding: utf-8 -*-\nr\"\"\"\nGraphics objects\n\nThis file contains the definition of the class :class:`Graphics`.\nUsually, you don't call the constructor of this class directly\n(although you can do it), you would use :func:`plot` instead.\n\nAUTHORS:\n\n- Jeroen Demeyer (2012-04-19): split off this file from plot.py (:trac:`12857`)\n\n- Punarbasu Purkayastha (2012-05-20): Add logarithmic scale (:trac:`4529`)\n\n- Emily Chen (2013-01-05): Add documentation for\n :meth:`~sage.plot.graphics.Graphics.show` figsize parameter (:trac:`5956`)\n\n- Eric Gourgoulhon (2015-03-19): Add parameter axes_labels_size (:trac:`18004`)\n\n- Eric Gourgoulhon (2019-05-24): :class:`~sage.plot.multigraphics.GraphicsArray`\n moved to new module :mod:`~sage.plot.multigraphics`; various improvements and\n fixes in :meth:`Graphics.matplotlib` and ``Graphics._set_scale``; new method\n :meth:`Graphics.inset`\n\n\"\"\"\n\n# ****************************************************************************\n# Copyright (C) 2006 Alex Clemesha <[email protected]>\n# Copyright (C) 2006-2008 William Stein <[email protected]>\n# Copyright (C) 2010 Jason Grout\n#\n# Distributed under the terms of the GNU General Public License (GPL)\n# as published by the Free Software Foundation; either version 2 of\n# the License, or (at your option) any later version.\n# https://www.gnu.org/licenses/\n# ****************************************************************************\n\nimport os\nfrom collections.abc import Iterable\nfrom math import isnan\nimport sage.misc.verbose\nfrom sage.misc.temporary_file import tmp_filename\nfrom sage.misc.fast_methods import WithEqualityById\nfrom sage.structure.sage_object import SageObject\nfrom sage.misc.decorators import suboptions\nfrom .colors import rgbcolor\n\nALLOWED_EXTENSIONS = ['.eps', '.pdf', '.pgf', '.png', '.ps', '.sobj', '.svg']\nDEFAULT_DPI = 100\n\n\n# If do_verify is True, options are checked when drawing a\n# GraphicsPrimitive. See primitive.py\ndo_verify = True\n\n\ndef is_Graphics(x):\n \"\"\"\n Return True if `x` is a Graphics object.\n\n EXAMPLES::\n\n sage: from sage.plot.graphics import is_Graphics\n sage: is_Graphics(1)\n False\n sage: is_Graphics(disk((0.0, 0.0), 1, (0, pi/2)))\n True\n \"\"\"\n return isinstance(x, Graphics)\n\n\ndef _parse_figsize(figsize):\n r\"\"\"\n Helper function to get a figure size in matplotlib format.\n\n INPUT:\n\n - ``figsize`` -- width or [width, height] in inches; if only the width is\n provided, the height is computed from matplotlib's default aspect ratio\n\n OUTPUT:\n\n - a pair of ``float``'s representing ``(width, height)``\n\n EXAMPLES::\n\n sage: from sage.plot.graphics import _parse_figsize\n sage: _parse_figsize([5, 4])\n (5.0, 4.0)\n\n The default aspect ratio is 4/3::\n\n sage: _parse_figsize(5) # tol 1.0e-13\n (5.0, 3.75)\n\n \"\"\"\n from matplotlib import rcParams\n if isinstance(figsize, (list, tuple)):\n # figsize should be a pair of positive numbers\n if len(figsize) != 2:\n raise ValueError(\"figsize should be a positive number or a list \"\n \"of two positive numbers, not {0}\".format(figsize))\n figsize = (float(figsize[0]), float(figsize[1])) # floats for mpl\n if not (figsize[0] > 0 and figsize[1] > 0):\n raise ValueError(\"figsize should be positive numbers, \"\n \"not {0} and {1}\".format(figsize[0], figsize[1]))\n else:\n # in this case, figsize is a single number representing the width and\n # should be positive\n try:\n figsize = float(figsize) # to pass to mpl\n except TypeError:\n raise TypeError(\"figsize should be a positive number, not {0}\".format(figsize))\n if figsize > 0:\n default_width, default_height = rcParams['figure.figsize']\n figsize = (figsize, default_height * figsize / default_width)\n else:\n raise ValueError(\"figsize should be positive, not {0}\".format(figsize))\n return figsize\n\n\nclass Graphics(WithEqualityById, SageObject):\n \"\"\"\n The Graphics object is an empty list of graphics objects. It is\n useful to use this object when initializing a for loop where\n different graphics object will be added to the empty object.\n\n EXAMPLES::\n\n sage: G = Graphics(); print(G)\n Graphics object consisting of 0 graphics primitives\n sage: c = circle((1,1), 1)\n sage: G+=c; print(G)\n Graphics object consisting of 1 graphics primitive\n\n Here we make a graphic of embedded isosceles triangles, coloring\n each one with a different color as we go::\n\n sage: h=10; c=0.4; p=0.5\n sage: G = Graphics()\n sage: for x in srange(1,h+1):\n ....: l = [[0,x*sqrt(3)],[-x/2,-x*sqrt(3)/2],[x/2,-x*sqrt(3)/2],[0,x*sqrt(3)]]\n ....: G+=line(l,color=hue(c + p*(x/h)))\n sage: G.show(figsize=[5,5])\n\n We can change the scale of the axes in the graphics before displaying.::\n\n sage: G = plot(exp, 1, 10) # long time\n sage: G.show(scale='semilogy') # long time\n\n TESTS:\n\n From :trac:`4604`, ensure Graphics can handle 3d objects::\n\n sage: g = Graphics()\n sage: g += sphere((1, 1, 1), 2)\n sage: g.show()\n\n We check that graphics can be pickled (we can't use equality on\n graphics so we just check that the load/dump cycle gives a\n :class:`Graphics` instance)::\n\n sage: g = Graphics()\n sage: g2 = loads(dumps(g))\n sage: g2.show()\n\n ::\n\n sage: isinstance(g2, Graphics)\n True\n\n sage: hash(Graphics()) # random\n 42\n\n .. automethod:: _rich_repr_\n \"\"\"\n\n def __init__(self):\n \"\"\"\n Create a new empty Graphics objects with all the defaults.\n\n EXAMPLES::\n\n sage: G = Graphics()\n \"\"\"\n self._axes_color = (0, 0, 0)\n self._axes_label_color = (0, 0, 0)\n self._axes_width = 0.8\n self._bbox_extra_artists = []\n self._extra_kwds = {}\n self._fontsize = 10\n self._axes_labels_size = 1.6\n self._legend_colors = []\n self._legend_opts = {}\n self._objects = []\n self._show_axes = True\n self._show_legend = False\n self._tick_label_color = (0, 0, 0)\n\n def set_aspect_ratio(self, ratio):\n \"\"\"\n Set the aspect ratio, which is the ratio of height and width\n of a unit square (i.e., height/width of a unit square), or\n 'automatic' (expand to fill the figure).\n\n INPUT:\n\n\n - ``ratio`` - a positive real number or 'automatic'\n\n\n EXAMPLES: We create a plot of the upper half of a circle, but it\n doesn't look round because the aspect ratio is off::\n\n sage: P = plot(sqrt(1-x^2),(x,-1,1)); P\n Graphics object consisting of 1 graphics primitive\n\n So we set the aspect ratio and now it is round::\n\n sage: P.set_aspect_ratio(1)\n sage: P.aspect_ratio()\n 1.0\n sage: P\n Graphics object consisting of 1 graphics primitive\n\n Note that the aspect ratio is inherited upon addition (which takes\n the max of aspect ratios of objects whose aspect ratio has been\n set)::\n\n sage: P + plot(sqrt(4-x^2),(x,-2,2))\n Graphics object consisting of 2 graphics primitives\n\n In the following example, both plots produce a circle that looks\n twice as tall as wide::\n\n sage: Q = circle((0,0), 0.5); Q.set_aspect_ratio(2)\n sage: (P + Q).aspect_ratio(); P+Q\n 2.0\n Graphics object consisting of 2 graphics primitives\n sage: (Q + P).aspect_ratio(); Q+P\n 2.0\n Graphics object consisting of 2 graphics primitives\n \"\"\"\n if ratio != 'auto' and ratio != 'automatic':\n ratio = float(ratio)\n if ratio <= 0:\n raise ValueError(\"the aspect ratio must be positive or 'automatic'\")\n else:\n ratio = 'automatic'\n self._extra_kwds['aspect_ratio'] = ratio\n\n def aspect_ratio(self):\n \"\"\"\n Get the current aspect ratio, which is the ratio of height to\n width of a unit square, or 'automatic'.\n\n OUTPUT: a positive float (height/width of a unit square), or 'automatic'\n (expand to fill the figure).\n\n EXAMPLES:\n\n The default aspect ratio for a new blank Graphics object is 'automatic'::\n\n sage: P = Graphics()\n sage: P.aspect_ratio()\n 'automatic'\n\n The aspect ratio can be explicitly set different than the object's default::\n\n sage: P = circle((1,1), 1)\n sage: P.aspect_ratio()\n 1.0\n sage: P.set_aspect_ratio(2)\n sage: P.aspect_ratio()\n 2.0\n sage: P.set_aspect_ratio('automatic')\n sage: P.aspect_ratio()\n 'automatic'\n \"\"\"\n return self._extra_kwds.get('aspect_ratio', 'automatic')\n\n def legend(self, show=None):\n r\"\"\"\n Set whether or not the legend is shown by default.\n\n INPUT:\n\n - ``show`` - (default: None) a boolean\n\n If called with no input, return the current legend setting.\n\n EXAMPLES:\n\n By default no legend is displayed::\n\n sage: P = plot(sin)\n sage: P.legend()\n False\n\n But if we put a label then the legend is shown::\n\n sage: P = plot(sin, legend_label='sin')\n sage: P.legend()\n True\n\n We can turn it on or off::\n\n sage: P.legend(False)\n sage: P.legend()\n False\n sage: P.legend(True)\n sage: P # show with the legend\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n if show is None:\n return self._show_legend\n else:\n self._show_legend = bool(show)\n\n def set_legend_options(self, **kwds):\n r\"\"\"\n Set various legend options.\n\n INPUT:\n\n - ``title`` - (default: None) string, the legend title\n\n - ``ncol`` - (default: 1) positive integer, the number of columns\n\n - ``columnspacing`` - (default: None) the spacing between columns\n\n - ``borderaxespad`` - (default: None) float, length between the axes and the legend\n\n - ``back_color`` - (default: 'white') This parameter can be a string\n denoting a color or an RGB tuple. The string can be a color name\n as in ('red', 'green', 'yellow', ...) or a floating point number\n like '0.8' which gets expanded to (0.8, 0.8, 0.8). The\n tuple form is just a floating point RGB tuple with all values ranging\n from 0 to 1.\n\n - ``handlelength`` - (default: 0.05) float, the length of the legend handles\n\n - ``handletextpad`` - (default: 0.5) float, the pad between the legend handle and text\n\n - ``labelspacing`` - (default: 0.02) float, vertical space between legend entries\n\n - ``loc`` - (default: 'best') May be a string, an integer or a tuple. String or\n integer inputs must be one of the following:\n\n - 0, 'best'\n\n - 1, 'upper right'\n\n - 2, 'upper left'\n\n - 3, 'lower left'\n\n - 4, 'lower right'\n\n - 5, 'right'\n\n - 6, 'center left'\n\n - 7, 'center right'\n\n - 8, 'lower center'\n\n - 9, 'upper center'\n\n - 10, 'center'\n\n - Tuple arguments represent an absolute (x, y) position on the plot\n in axes coordinates (meaning from 0 to 1 in each direction).\n\n - ``markerscale`` - (default: 0.6) float, how much to scale the markers in the legend.\n\n - ``numpoints`` - (default: 2) integer, the number of points in the legend for line\n\n - ``borderpad`` - (default: 0.6) float, the fractional whitespace inside the legend border\n (between 0 and 1)\n\n - ``font_family`` - (default: 'sans-serif') string, one of 'serif', 'sans-serif',\n 'cursive', 'fantasy', 'monospace'\n\n - ``font_style`` - (default: 'normal') string, one of 'normal', 'italic', 'oblique'\n\n - ``font_variant`` - (default: 'normal') string, one of 'normal', 'small-caps'\n\n - ``font_weight`` - (default: 'medium') string, one of 'black', 'extra bold', 'bold',\n 'semibold', 'medium', 'normal', 'light'\n\n - ``font_size`` - (default: 'medium') string, one of 'xx-small', 'x-small', 'small',\n 'medium', 'large', 'x-large', 'xx-large' or an absolute font size (e.g. 12)\n\n - ``shadow`` - (default: True) boolean - draw a shadow behind the legend\n\n - ``fancybox`` - (default: False) a boolean. If True, draws a frame with a round\n fancybox.\n\n These are all keyword arguments.\n\n OUTPUT: a dictionary of all current legend options\n\n EXAMPLES:\n\n By default, no options are set::\n\n sage: p = plot(tan, legend_label='tan')\n sage: p.set_legend_options()\n {}\n\n We build a legend without a shadow::\n\n sage: p.set_legend_options(shadow=False)\n sage: p.set_legend_options()['shadow']\n False\n\n To set the legend position to the center of the plot, all these\n methods are roughly equivalent::\n\n sage: p.set_legend_options(loc='center'); p\n Graphics object consisting of 1 graphics primitive\n\n ::\n\n sage: p.set_legend_options(loc=10); p\n Graphics object consisting of 1 graphics primitive\n\n ::\n\n sage: p.set_legend_options(loc=(0.5,0.5)); p # aligns the bottom of the box to the center\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n if len(kwds) == 0:\n return self._legend_opts\n else:\n self._legend_opts.update(kwds)\n\n def get_axes_range(self):\n \"\"\"\n Returns a dictionary of the range of the axes for this graphics\n object. This is fall back to the ranges in get_minmax_data() for\n any value which the user has not explicitly set.\n\n .. warning::\n\n Changing the dictionary returned by this function does not\n change the axes range for this object. To do that, use the\n :meth:`set_axes_range` method.\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: list(sorted(L.get_axes_range().items()))\n [('xmax', 3.0), ('xmin', 1.0), ('ymax', 5.0), ('ymin', -4.0)]\n sage: L.set_axes_range(xmin=-1)\n sage: list(sorted(L.get_axes_range().items()))\n [('xmax', 3.0), ('xmin', -1.0), ('ymax', 5.0), ('ymin', -4.0)]\n \"\"\"\n axes_range = self.get_minmax_data()\n axes_range.update(self._get_axes_range_dict())\n return axes_range\n\n def set_axes_range(self, xmin=None, xmax=None, ymin=None, ymax=None):\n \"\"\"\n Set the ranges of the `x` and `y` axes.\n\n INPUT:\n\n\n - ``xmin, xmax, ymin, ymax`` - floats\n\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: L.set_axes_range(-1, 20, 0, 2)\n sage: d = L.get_axes_range()\n sage: d['xmin'], d['xmax'], d['ymin'], d['ymax']\n (-1.0, 20.0, 0.0, 2.0)\n \"\"\"\n l = locals()\n axes_range = self._get_axes_range_dict()\n for name in ['xmin', 'xmax', 'ymin', 'ymax']:\n if l[name] is not None:\n axes_range[name] = float(l[name])\n\n axes_range = set_axes_range\n\n def _get_axes_range_dict(self):\n \"\"\"\n Returns the underlying dictionary used to store the user's\n custom ranges for the axes on this object.\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: L._get_axes_range_dict()\n {}\n sage: L.set_axes_range(xmin=-1)\n sage: L._get_axes_range_dict()\n {'xmin': -1.0}\n \"\"\"\n try:\n return self._axes_range\n except AttributeError:\n self._axes_range = {}\n return self._axes_range\n\n def set_flip(self, flip_x=None, flip_y=None):\n \"\"\"\n Set the flip options for this graphics object.\n\n INPUT:\n\n - ``flip_x`` -- boolean (default: ``None``); if not ``None``, set the\n ``flip_x`` option to this value\n - ``flip_y`` -- boolean (default: ``None``); if not ``None``, set the\n ``flip_y`` option to this value\n\n EXAMPLES::\n\n sage: L = line([(1, 0), (2, 3)])\n sage: L.set_flip(flip_y=True)\n sage: L.flip()\n (False, True)\n sage: L.set_flip(True, False)\n sage: L.flip()\n (True, False)\n \"\"\"\n if flip_x is not None:\n self._extra_kwds['flip_x'] = flip_x\n if flip_y is not None:\n self._extra_kwds['flip_y'] = flip_y\n\n def flip(self, flip_x=False, flip_y=False):\n \"\"\"\n Get the flip options and optionally mirror this graphics object.\n\n INPUT:\n\n - ``flip_x`` -- boolean (default: ``False``); if ``True``, replace the\n current ``flip_x`` option by its opposite\n - ``flip_y`` -- boolean (default: ``False``); if ``True``, replace the\n current ``flip_y`` option by its opposite\n\n OUTPUT: a tuple containing the new flip options\n\n EXAMPLES:\n\n When called without arguments, this just returns the current flip\n options::\n\n sage: L = line([(1, 0), (2, 3)])\n sage: L.flip()\n (False, False)\n\n Otherwise, the specified options are changed and the new options are\n returned::\n\n sage: L.flip(flip_y=True)\n (False, True)\n sage: L.flip(True, True)\n (True, False)\n \"\"\"\n a = self._extra_kwds.get('flip_x', self.SHOW_OPTIONS['flip_x'])\n b = self._extra_kwds.get('flip_y', self.SHOW_OPTIONS['flip_y'])\n if flip_x:\n a = not a\n self._extra_kwds['flip_x'] = a\n if flip_y:\n b = not b\n self._extra_kwds['flip_y'] = b\n return (a, b)\n\n def fontsize(self, s=None):\n \"\"\"\n Set the font size of axes labels and tick marks.\n\n Note that the relative size of the axes labels font w.r.t. the tick\n marks font can be adjusted via :meth:`axes_labels_size`.\n\n INPUT:\n\n\n - ``s`` - integer, a font size in points.\n\n\n If called with no input, return the current fontsize.\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: L.fontsize()\n 10\n sage: L.fontsize(20)\n sage: L.fontsize()\n 20\n\n All the numbers on the axes will be very large in this plot::\n\n sage: L\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n if s is None:\n try:\n return self._fontsize\n except AttributeError:\n self._fontsize = 10\n return self._fontsize\n self._fontsize = int(s)\n\n def axes_labels_size(self, s=None):\n \"\"\"\n Set the relative size of axes labels w.r.t. the axes tick marks.\n\n INPUT:\n\n - ``s`` - float, relative size of axes labels w.r.t. to the tick marks,\n the size of the tick marks being set by :meth:`fontsize`.\n\n If called with no input, return the current relative size.\n\n EXAMPLES::\n\n sage: p = plot(sin(x^2), (x, -3, 3), axes_labels=['$x$','$y$'])\n sage: p.axes_labels_size() # default value\n 1.6\n sage: p.axes_labels_size(2.5)\n sage: p.axes_labels_size()\n 2.5\n\n Now the axes labels are large w.r.t. the tick marks::\n\n sage: p\n Graphics object consisting of 1 graphics primitive\n\n \"\"\"\n if s is None:\n try:\n return self._axes_labels_size\n except AttributeError:\n self._axes_labels_size = 1.6\n return self._axes_labels_size\n self._axes_labels_size = float(s)\n\n def axes(self, show=None):\n \"\"\"\n Set whether or not the `x` and `y` axes are shown\n by default.\n\n INPUT:\n\n\n - ``show`` - bool\n\n\n If called with no input, return the current axes setting.\n\n EXAMPLES::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n\n By default the axes are displayed.\n\n ::\n\n sage: L.axes()\n True\n\n But we turn them off, and verify that they are off\n\n ::\n\n sage: L.axes(False)\n sage: L.axes()\n False\n\n Displaying L now shows a triangle but no axes.\n\n ::\n\n sage: L\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n if show is None:\n try:\n return self._show_axes\n except AttributeError:\n self._show_axes = True\n return self._show_axes\n self._show_axes = bool(show)\n\n def axes_color(self, c=None):\n \"\"\"\n Set the axes color.\n\n If called with no input, return the current axes_color setting.\n\n INPUT:\n\n\n - ``c`` - an RGB color 3-tuple, where each tuple entry\n is a float between 0 and 1\n\n\n EXAMPLES: We create a line, which has like everything a default\n axes color of black.\n\n ::\n\n sage: L = line([(1,2), (3,-4), (2, 5), (1,2)])\n sage: L.axes_color()\n (0, 0, 0)\n\n We change the axes color to red and verify the change.\n\n ::\n\n sage: L.axes_color((1,0,0))\n sage: L.axes_color()\n (1.0, 0.0, 0.0)\n\n When we display the plot, we'll see a blue triangle and bright red\n axes.\n\n ::\n\n sage: L\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n if c is None:\n try:\n return self._axes_color\n\n except AttributeError:\n self._axes_color = (0.0, 0.0, 0.0)\n return self._axes_color\n self._axes_color = rgbcolor(c)\n\n def axes_labels(self, l=None):\n \"\"\"\n Set the axes labels.\n\n INPUT:\n\n\n - ``l`` - (default: None) a list of two strings or\n None\n\n\n OUTPUT: a 2-tuple of strings\n\n If l is None, returns the current ``axes_labels``,\n which is itself by default None. The default labels are both\n empty.\n\n EXAMPLES: We create a plot and put x and y axes labels on it.\n\n ::\n\n sage: p = plot(sin(x), (x, 0, 10))\n sage: p.axes_labels(['$x$','$y$'])\n sage: p.axes_labels()\n ('$x$', '$y$')\n\n Now when you plot p, you see x and y axes labels::\n\n sage: p\n Graphics object consisting of 1 graphics primitive\n\n Notice that some may prefer axes labels which are not\n typeset::\n\n sage: plot(sin(x), (x, 0, 10), axes_labels=['x','y'])\n Graphics object consisting of 1 graphics primitive\n\n TESTS:\n\n Unicode strings are acceptable; see :trac:`13161`. Note that\n this does not guarantee that matplotlib will handle the strings\n properly, although it should.\n\n ::\n\n sage: c = circle((0,0), 1)\n sage: c.axes_labels(['axe des abscisses', 'axe des ordonnées'])\n sage: c._axes_labels\n ('axe des abscisses', 'axe des ordonnées')\n \"\"\"\n if l is None:\n try:\n return self._axes_labels\n except AttributeError:\n self._axes_labels = None\n return self._axes_labels\n if not isinstance(l, (list, tuple)):\n raise TypeError(\"l must be a list or tuple\")\n if len(l) != 2:\n raise ValueError(\"l must have length 2\")\n self._axes_labels = tuple(l)\n\n def axes_label_color(self, c=None):\n r\"\"\"\n Set the color of the axes labels.\n\n The axes labels are placed at the edge of the x and y axes, and are\n not on by default (use the ``axes_labels`` command to\n set them; see the example below). This function just changes their\n color.\n\n INPUT:\n\n\n - ``c`` - an RGB 3-tuple of numbers between 0 and 1\n\n\n If called with no input, return the current axes_label_color\n setting.\n\n EXAMPLES: We create a plot, which by default has axes label color\n black.\n\n ::\n\n sage: p = plot(sin, (-1,1))\n sage: p.axes_label_color()\n (0, 0, 0)\n\n We change the labels to be red, and confirm this::\n\n sage: p.axes_label_color((1,0,0))\n sage: p.axes_label_color()\n (1.0, 0.0, 0.0)\n\n We set labels, since otherwise we won't see anything.\n\n ::\n\n sage: p.axes_labels(['$x$ axis', '$y$ axis'])\n\n In the plot below, notice that the labels are red::\n\n sage: p\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n if c is None:\n try:\n return self._axes_label_color\n except AttributeError:\n self._axes_label_color = (0, 0, 0)\n return self._axes_label_color\n self._axes_label_color = rgbcolor(c)\n\n def axes_width(self, w=None):\n r\"\"\"\n Set the axes width. Use this to draw a plot with really fat or\n really thin axes.\n\n INPUT:\n\n\n - ``w`` - a float\n\n\n If called with no input, return the current\n ``axes_width`` setting.\n\n EXAMPLES: We create a plot, see the default axes width (with funny\n Python float rounding), then reset the width to 10 (very fat).\n\n ::\n\n sage: p = plot(cos, (-3,3))\n sage: p.axes_width()\n 0.8\n sage: p.axes_width(10)\n sage: p.axes_width()\n 10.0\n\n Finally we plot the result, which is a graph with very fat axes.\n\n ::\n\n sage: p\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n if w is None:\n try:\n return self._axes_width\n except AttributeError:\n self._axes_width = True\n return self._axes_width\n self._axes_width = float(w)\n\n def tick_label_color(self, c=None):\n \"\"\"\n Set the color of the axes tick labels.\n\n INPUT:\n\n\n - ``c`` - an RGB 3-tuple of numbers between 0 and 1\n\n\n If called with no input, return the current tick_label_color\n setting.\n\n EXAMPLES::\n\n sage: p = plot(cos, (-3,3))\n sage: p.tick_label_color()\n (0, 0, 0)\n sage: p.tick_label_color((1,0,0))\n sage: p.tick_label_color()\n (1.0, 0.0, 0.0)\n sage: p\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n if c is None:\n try:\n return self._tick_label_color\n except AttributeError:\n self._tick_label_color = (0, 0, 0)\n return self._tick_label_color\n self._tick_label_color = rgbcolor(c)\n\n def _repr_(self):\n r\"\"\"\n Return a string representation of the graphics objects.\n\n OUTPUT:\n\n String.\n\n EXAMPLES:\n\n We create a plot and call :meth:`show` on it, which causes it\n to be displayed as a plot::\n\n sage: P = plot(cos, (-1,1))\n sage: P.show()\n\n Just doing this also displays the plot::\n\n sage: P\n Graphics object consisting of 1 graphics primitive\n\n Using the Python `repr` or `str` commands do not display the\n plot::\n\n sage: repr(P)\n 'Graphics object consisting of 1 graphics primitive'\n sage: str(P)\n 'Graphics object consisting of 1 graphics primitive'\n sage: print(P)\n Graphics object consisting of 1 graphics primitive\n\n TESTS::\n\n sage: P._repr_()\n 'Graphics object consisting of 1 graphics primitive'\n \"\"\"\n return str(self)\n\n def _rich_repr_(self, display_manager, **kwds):\n \"\"\"\n Rich Output Magic Method\n\n See :mod:`sage.repl.rich_output` for details.\n\n EXAMPLES::\n\n sage: from sage.repl.rich_output import get_display_manager\n sage: dm = get_display_manager()\n sage: g = Graphics()\n sage: g._rich_repr_(dm)\n OutputImagePng container\n \"\"\"\n types = display_manager.types\n prefer_raster = (\n ('.png', types.OutputImagePng),\n ('.jpg', types.OutputImageJpg),\n ('.gif', types.OutputImageGif),\n )\n prefer_vector = (\n ('.svg', types.OutputImageSvg),\n ('.pdf', types.OutputImagePdf),\n )\n graphics = display_manager.preferences.graphics\n if graphics == 'disable':\n return\n elif graphics == 'raster' or graphics is None:\n preferred = prefer_raster + prefer_vector\n elif graphics == 'vector':\n preferred = prefer_vector + prefer_raster\n else:\n raise ValueError('unknown graphics output preference')\n for file_ext, output_container in preferred:\n if output_container in display_manager.supported_output():\n return display_manager.graphics_from_save(\n self.save, kwds, file_ext, output_container)\n\n def __str__(self):\n r\"\"\"\n Return string representation of this plot.\n\n OUTPUT:\n\n String.\n\n EXAMPLES::\n\n sage: S = circle((0,0), 2); S.__str__()\n 'Graphics object consisting of 1 graphics primitive'\n sage: str(S)\n 'Graphics object consisting of 1 graphics primitive'\n sage: print(S)\n Graphics object consisting of 1 graphics primitive\n \"\"\"\n s = \"Graphics object consisting of %s graphics primitives\" % (len(self))\n if len(self) == 1:\n s = s[:-1]\n return s\n\n def __getitem__(self, i):\n \"\"\"\n Returns the ith graphics primitive object:\n\n EXAMPLES::\n\n sage: G = circle((1,1),2) + circle((2,2),5); print(G)\n Graphics object consisting of 2 graphics primitives\n sage: G[1]\n Circle defined by (2.0,2.0) with r=5.0\n \"\"\"\n return self._objects[i]\n\n def __len__(self):\n \"\"\"\n If G is of type Graphics, then len(G) gives the number of distinct\n graphics primitives making up that object.\n\n EXAMPLES::\n\n sage: G = circle((1,1),1) + circle((1,2),1) + circle((1,2),5); print(G)\n Graphics object consisting of 3 graphics primitives\n sage: len(G)\n 3\n \"\"\"\n return len(self._objects)\n\n def __delitem__(self, i):\n \"\"\"\n If G is of type Graphics, then del(G[i]) removes the ith distinct\n graphic primitive making up that object.\n\n EXAMPLES::\n\n sage: G = circle((1,1),1) + circle((1,2),1) + circle((1,2),5); print(G)\n Graphics object consisting of 3 graphics primitives\n sage: len(G)\n 3\n sage: del(G[2])\n sage: print(G)\n Graphics object consisting of 2 graphics primitives\n sage: len(G)\n 2\n \"\"\"\n del self._objects[int(i)]\n\n def __setitem__(self, i, x):\n \"\"\"\n You can replace a GraphicPrimitive (point, line, circle, etc...) in\n a Graphics object G with any other GraphicPrimitive\n\n EXAMPLES::\n\n sage: G = circle((1,1),1) + circle((1,2),1) + circle((1,2),5); print(G)\n Graphics object consisting of 3 graphics primitives\n\n ::\n\n sage: p = polygon([[1,3],[2,-2],[1,1],[1,3]]); print(p)\n Graphics object consisting of 1 graphics primitive\n\n ::\n\n sage: G[1] = p[0]\n sage: G # show the plot\n Graphics object consisting of 3 graphics primitives\n \"\"\"\n from sage.plot.primitive import GraphicPrimitive\n if not isinstance(x, GraphicPrimitive):\n raise TypeError(\"x must be a GraphicPrimitive\")\n self._objects[int(i)] = x\n\n def __radd__(self, other):\n \"\"\"\n Compute and return other + this graphics object.\n\n This only works when other is a Python int equal to 0. In all other\n cases a TypeError is raised. The main reason for this function is\n to make summing a list of graphics objects easier.\n\n EXAMPLES::\n\n sage: S = circle((0,0), 2)\n sage: print(int(0) + S)\n Graphics object consisting of 1 graphics primitive\n sage: print(S + int(0))\n Graphics object consisting of 1 graphics primitive\n\n The following would fail were it not for this function::\n\n sage: v = [circle((0,0), 2), circle((2,3), 1)]\n sage: print(sum(v))\n Graphics object consisting of 2 graphics primitives\n \"\"\"\n if isinstance(other, int) and other == 0:\n return self\n raise TypeError\n\n def __add__(self, other):\n \"\"\"\n If you have any Graphics object G1, you can always add any other\n amount of Graphics objects G2,G3,... to form a new Graphics object:\n G4 = G1 + G2 + G3.\n\n The xmin, xmax, ymin, and ymax properties of the graphics objects\n are expanded to include all objects in both scenes. If the aspect\n ratio property of either or both objects are set, then the larger\n aspect ratio is chosen, with 'automatic' being overridden by a\n numeric aspect ratio.\n\n If one of the graphics object is set to show a legend, then\n the resulting object will also be set to show a legend. Legend\n options are propagated if set. If the same legend option is\n present in both arguments, the latter value is used.\n\n EXAMPLES::\n\n sage: g1 = plot(abs(sqrt(x^3-1)), (x,1,5), frame=True)\n sage: g2 = plot(-abs(sqrt(x^3-1)), (x,1,5), color='red')\n sage: g1 + g2 # displays the plot\n Graphics object consisting of 2 graphics primitives\n\n TESTS:\n\n Extra keywords to show are propagated::\n\n sage: (g1 + g2)._extra_kwds=={'aspect_ratio': 'automatic', 'frame': True}\n True\n sage: g1.set_aspect_ratio(2)\n sage: (g1+g2).aspect_ratio()\n 2.0\n sage: g2.set_aspect_ratio(3)\n sage: (g1+g2).aspect_ratio()\n 3.0\n\n As are legend options, :trac:`12936`::\n\n sage: p1 = plot(x, x, 0, 1)\n sage: p2 = p1\n sage: p1.set_legend_options(back_color = 'black')\n sage: p2.set_legend_options(shadow = False)\n sage: p3 = p1 + p2\n sage: p3._legend_opts\n {'back_color': 'black', 'shadow': False}\n\n If the same legend option is specified more than once, the\n latter takes precedence::\n\n sage: p1 = plot(x, x, 0, 1)\n sage: p2 = p1\n sage: p1.set_legend_options(shadow = True)\n sage: p2.set_legend_options(shadow = False)\n sage: p3 = p1 + p2\n sage: p3._legend_opts\n {'shadow': False}\n\n Flipped axes take precedence over non-flipped axes::\n\n sage: p1 = plot(x, x, 0, 1, flip_x=True, flip_y=True)\n sage: p2 = plot(x^2, x, 0, 1)\n sage: [p._extra_kwds[k] for p in [p1 + p2, p2 + p1] for k in ['flip_x', 'flip_y']]\n [True, True, True, True]\n \"\"\"\n if isinstance(other, int) and other == 0:\n return self\n if not isinstance(other, Graphics):\n from sage.plot.plot3d.base import Graphics3d\n if isinstance(other, Graphics3d):\n return self.plot3d() + other\n raise TypeError(\"other (=%s) must be a Graphics objects\" % other)\n g = Graphics()\n g._objects = self._objects + other._objects\n g._show_legend = self._show_legend or other._show_legend\n g._extra_kwds.update(self._extra_kwds)\n g._extra_kwds.update(other._extra_kwds)\n g._legend_colors = self._legend_colors + other._legend_colors\n g._legend_opts.update(self._legend_opts)\n g._legend_opts.update(other._legend_opts)\n if 'flip_x' in self._extra_kwds and 'flip_x' in other._extra_kwds:\n g._extra_kwds['flip_x'] = (self._extra_kwds['flip_x']\n or other._extra_kwds['flip_x'])\n if 'flip_y' in self._extra_kwds and 'flip_y' in other._extra_kwds:\n g._extra_kwds['flip_y'] = (self._extra_kwds['flip_y']\n or other._extra_kwds['flip_y'])\n if self.aspect_ratio() == 'automatic':\n g.set_aspect_ratio(other.aspect_ratio())\n elif other.aspect_ratio() == 'automatic':\n g.set_aspect_ratio(self.aspect_ratio())\n else:\n g.set_aspect_ratio(max(self.aspect_ratio(), other.aspect_ratio()))\n return g\n\n def add_primitive(self, primitive):\n \"\"\"\n Adds a primitive to this graphics object.\n\n EXAMPLES:\n\n We give a very explicit example::\n\n sage: G = Graphics()\n sage: from sage.plot.line import Line\n sage: from sage.plot.arrow import Arrow\n sage: L = Line([3,4,2,7,-2],[1,2,e,4,5.],{'alpha':1,'thickness':2,'rgbcolor':(0,1,1),'legend_label':''})\n sage: A = Arrow(2,-5,.1,.2,{'width':3,'head':0,'rgbcolor':(1,0,0),'linestyle':'dashed','zorder':8,'legend_label':''})\n sage: G.add_primitive(L)\n sage: G.add_primitive(A)\n sage: G\n Graphics object consisting of 2 graphics primitives\n \"\"\"\n self._objects.append(primitive)\n\n def plot(self):\n \"\"\"\n Draw a 2D plot of this graphics object, which just returns this\n object since this is already a 2D graphics object.\n\n EXAMPLES::\n\n sage: S = circle((0,0), 2)\n sage: S.plot() is S\n True\n\n It does not accept any argument (:trac:`19539`)::\n\n sage: S.plot(1)\n Traceback (most recent call last):\n ...\n TypeError: ...plot() takes 1 positional argument but 2 were given\n\n sage: S.plot(hey=\"hou\")\n Traceback (most recent call last):\n ...\n TypeError: ...plot() got an unexpected keyword argument 'hey'\n \"\"\"\n return self\n\n def plot3d(self, z=0, **kwds):\n \"\"\"\n Returns an embedding of this 2D plot into the xy-plane of 3D space,\n as a 3D plot object. An optional parameter z can be given to\n specify the z-coordinate.\n\n EXAMPLES::\n\n sage: sum([plot(z*sin(x), 0, 10).plot3d(z) for z in range(6)]) # long time\n Graphics3d Object\n \"\"\"\n from sage.plot.plot3d.base import Graphics3dGroup\n g = Graphics3dGroup([g.plot3d(**kwds) for g in self._objects])\n if z:\n g = g.translate(0, 0, z)\n return g\n\n @classmethod\n def _extract_kwds_for_show(cls, kwds, ignore=[]):\n \"\"\"\n Extract keywords relevant to show() from the provided dictionary.\n\n EXAMPLES::\n\n sage: kwds = {'f': lambda x: x, 'xmin': 0, 'figsize': [1,1], 'plot_points': (40, 40)}\n sage: G_kwds = Graphics._extract_kwds_for_show(kwds, ignore='xmin')\n sage: kwds # Note how this action modifies the passed dictionary\n {'f': <function <lambda> at 0x...>,\n 'plot_points': (40, 40),\n 'xmin': 0}\n sage: G_kwds\n {'figsize': [1, 1]}\n\n This method is intended to be used with _set_extra_kwds(). Here is an\n idiom to ensure the correct keywords will get passed on to show()::\n\n sage: options = {} # Usually this will come from an argument\n sage: g = Graphics()\n sage: g._set_extra_kwds(Graphics._extract_kwds_for_show(options))\n \"\"\"\n result = {}\n for option in cls.SHOW_OPTIONS:\n if option not in ignore:\n try:\n result[option] = kwds.pop(option)\n except KeyError:\n pass\n return result\n\n def _set_extra_kwds(self, kwds):\n \"\"\"\n Set a dictionary of keywords that will get passed on to show().\n\n TESTS::\n\n sage: g = Graphics()\n sage: g._extra_kwds\n {}\n sage: g._set_extra_kwds({'figsize': [10,10]})\n sage: g._extra_kwds\n {'figsize': [10, 10]}\n sage: g.show() # Now the (blank) plot will be extra large\n \"\"\"\n self._extra_kwds = kwds\n\n def _set_scale(self, subplot, scale=None, base=None):\n \"\"\"\n Set the scale of the axes in the current subplot. This function is\n only for internal use.\n\n INPUT:\n - ``subplot`` -- matplotlib Axes instance.\n - ``scale`` -- the scale of the figure. Values it can take are\n ``\"linear\"``, ``\"loglog\"``, ``\"semilogx\"``, ``\"semilogy\"``. See\n :meth:`show` for other options it can take.\n - ``base`` -- the base of the logarithm if a logarithmic scale is\n set. See :meth:`show` for the options it can take.\n\n OUTPUT:\n The scale in the form of a tuple: (xscale, yscale, basex, basey)\n\n EXAMPLES::\n\n sage: p = plot(x, 1, 10)\n sage: fig = p.matplotlib()\n sage: ax = fig.get_axes()[0]\n sage: p._set_scale(ax, scale='linear', base=2)\n ('linear', 'linear', 10, 10)\n sage: p._set_scale(ax, scale='semilogy', base=2)\n ('linear', 'log', 10, 2)\n sage: p._set_scale(ax, scale=('loglog', 2, 3))\n ('log', 'log', 2, 3)\n sage: p._set_scale(ax, scale=['semilogx', 2])\n ('log', 'linear', 2, 10)\n\n TESTS::\n\n sage: p._set_scale(ax, 'log')\n Traceback (most recent call last):\n ...\n ValueError: The scale must be one of 'linear', 'loglog', 'semilogx' or 'semilogy' -- got 'log'\n sage: p._set_scale(ax, ('loglog', 1))\n Traceback (most recent call last):\n ...\n ValueError: The base of the logarithm must be greater than 1\n \"\"\"\n if scale is None:\n return ('linear', 'linear', 10, 10)\n if isinstance(scale, (list, tuple)):\n if len(scale) != 2 and len(scale) != 3:\n raise ValueError(\"If the input is a tuple, it must be of \"\n \"the form (scale, base) or (scale, basex, basey)\")\n if len(scale) == 2:\n base = scale[1]\n else:\n base = scale[1:]\n scale = scale[0]\n\n if scale not in ('linear', 'loglog', 'semilogx', 'semilogy'):\n raise ValueError(\"The scale must be one of 'linear', 'loglog',\"\n \" 'semilogx' or 'semilogy' -- got '{0}'\".format(scale))\n\n if isinstance(base, (list, tuple)):\n basex, basey = base\n elif base is None:\n basex = basey = 10\n else:\n basex = basey = base\n\n if basex <= 1 or basey <= 1:\n raise ValueError(\"The base of the logarithm must be greater \"\n \"than 1\")\n\n xscale = yscale = 'linear'\n if scale == 'linear':\n basex = basey = 10\n elif scale == 'loglog':\n subplot.set_xscale('log', base=basex)\n subplot.set_yscale('log', base=basey)\n xscale = yscale = 'log'\n elif scale == 'semilogx':\n subplot.set_xscale('log', base=basex)\n basey = 10\n xscale = 'log'\n elif scale == 'semilogy':\n subplot.set_yscale('log', base=basey)\n basex = 10\n yscale = 'log'\n\n return (xscale, yscale, basex, basey)\n\n # This dictionary has the default values for the keywords to show(). When\n # show is invoked with keyword arguments, those arguments are merged with\n # this dictionary to create a set of keywords with the defaults filled in.\n # Then, those keywords are passed on to save().\n\n # NOTE: If you intend to use a new parameter in show(), you should update\n # this dictionary to contain the default value for that parameter.\n\n SHOW_OPTIONS = dict(# axes options\n axes=None, axes_labels=None, axes_labels_size=None,\n axes_pad=None, base=None, scale=None,\n xmin=None, xmax=None, ymin=None, ymax=None,\n flip_x=False, flip_y=False,\n # Figure options\n aspect_ratio=None, dpi=DEFAULT_DPI, fig_tight=True,\n figsize=None, fontsize=None, frame=False,\n title=None, title_pos=None, transparent=False,\n # Grid options\n gridlines=None, gridlinesstyle=None,\n hgridlinesstyle=None, vgridlinesstyle=None,\n # Legend options\n legend_options={}, show_legend=None,\n # Ticks options\n ticks=None, tick_formatter=None, ticks_integer=False,\n # Text options\n typeset='default')\n\n # Default options for the legends:\n\n LEGEND_OPTIONS = dict(back_color='white', borderpad=0.6,\n borderaxespad=None,\n columnspacing=None,\n fancybox=False, font_family='sans-serif',\n font_size='medium', font_style='normal',\n font_variant='normal', font_weight='medium',\n handlelength=0.05, handletextpad=0.5,\n labelspacing=0.02, loc='best',\n markerscale=0.6, ncol=1, numpoints=2,\n shadow=True, title=None)\n\n @suboptions('legend', **LEGEND_OPTIONS)\n def show(self, **kwds):\n r\"\"\"\n Show this graphics image immediately.\n\n This method attempts to display the graphics immediately,\n without waiting for the currently running code (if any) to\n return to the command line. Be careful, calling it from within\n a loop will potentially launch a large number of external\n viewer programs.\n\n OPTIONAL INPUT:\n\n - ``dpi`` - (default: 100) dots per inch\n\n - ``figsize`` - (default: [6.4, 4.8]) [width, height] inches. The\n maximum value of each of the width and the height can be 327\n inches, at the default ``dpi`` of 100 dpi, which is just shy of\n the maximum allowed value of 32768 dots (pixels).\n\n - ``fig_tight`` - (default: True) whether to clip the drawing\n tightly around drawn objects. If True, then the resulting\n image will usually not have dimensions corresponding to\n ``figsize``. If False, the resulting image will have\n dimensions corresponding to ``figsize``.\n\n - ``aspect_ratio`` - the perceived height divided by the\n perceived width. For example, if the aspect ratio is set to ``1``, circles\n will look round and a unit square will appear to have sides\n of equal length, and if the aspect ratio is set ``2``, vertical units will be\n twice as long as horizontal units, so a unit square will be twice as\n high as it is wide. If set to ``'automatic'``, the aspect ratio\n is determined by ``figsize`` and the picture fills the figure.\n\n - ``axes`` - (default: True)\n\n - ``axes_labels`` - (default: None) list (or tuple) of two\n strings; the first is used as the label for the horizontal\n axis, and the second for the vertical axis.\n\n - ``axes_labels_size`` - (default: current setting -- 1.6) scale factor\n relating the size of the axes labels with respect to the size of the\n tick marks.\n\n - ``fontsize`` - (default: current setting -- 10) positive\n integer; used for axes labels; if you make this very large,\n you may have to increase figsize to see all labels.\n\n - ``frame`` - (default: False) draw a frame around the image\n\n - ``gridlines`` - (default: None) can be any of the following:\n\n - None, False: do not add grid lines.\n\n - True, \"automatic\", \"major\": add grid lines at major ticks of the axes.\n\n - \"minor\": add grid at major and minor ticks.\n\n - [xlist,ylist]: a tuple or list containing\n two elements, where xlist (or ylist) can be\n any of the following.\n\n\n - None, False: don't add horizontal (or vertical) lines.\n\n - True, \"automatic\", \"major\": add horizontal (or vertical) grid lines at\n the major ticks of the axes.\n\n - \"minor\": add horizontal (or vertical) grid lines at major and minor ticks of\n axes.\n\n - an iterable yielding numbers n or pairs (n,opts), where n\n is the coordinate of the line and opt is a dictionary of\n MATPLOTLIB options for rendering the line.\n\n\n - ``gridlinesstyle, hgridlinesstyle, vgridlinesstyle`` -\n (default: None) a dictionary of MATPLOTLIB options for the\n rendering of the grid lines, the horizontal grid lines or the\n vertical grid lines, respectively.\n\n - ``transparent`` - (default: False) If True, make the background transparent.\n\n - ``axes_pad`` - (default: 0.02 on ``\"linear\"`` scale, 1 on\n ``\"log\"`` scale).\n\n - In the ``\"linear\"`` scale, it determines the percentage of the\n axis range that is added to each end of each axis. This helps\n avoid problems like clipping lines because of line-width, etc.\n To get axes that are exactly the specified limits, set\n ``axes_pad`` to zero.\n\n - On the ``\"log\"`` scale, it determines the exponent of the\n fraction of the minimum (resp. maximum) that is subtracted from\n the minimum (resp. added to the maximum) value of the axis. For\n instance if the minimum is `m` and the base of the axis is `b`\n then the new minimum after padding the axis will be\n `m - m/b^{\\mathrm{axes\\_pad}}`.\n\n - ``ticks_integer`` - (default: False) guarantee that the ticks\n are integers (the ``ticks`` option, if specified, will\n override this)\n\n - ``ticks`` - A matplotlib locator for the major ticks, or\n a number. There are several options. For more information about\n locators, type ``from matplotlib import ticker`` and then\n ``ticker?``.\n\n - If this is a locator object, then it is the locator for\n the horizontal axis. A value of None means use the default\n locator.\n\n - If it is a list of two locators, then the first is for the\n horizontal axis and one for the vertical axis. A value of\n None means use the default locator (so a value of\n [None, my_locator] uses my_locator for the vertical axis and\n the default for the horizontal axis).\n\n - If in either case above one of the entries is a number `m`\n (something which can be coerced to a float), it will be\n replaced by a MultipleLocator which places major ticks at\n integer multiples of `m`. See examples.\n\n - If in either case above one of the entries is a list of\n numbers, it will be replaced by a FixedLocator which places\n ticks at the locations specified. This includes the case of\n of the empty list, which will give no ticks. See examples.\n\n - ``tick_formatter`` - A matplotlib formatter for the major\n ticks. There are several options. For more information about\n formatters, type ``from matplotlib import ticker`` and then\n ``ticker?``.\n\n If the value of this keyword is a single item, then this will\n give the formatting for the horizontal axis *only* (except for\n the ``\"latex\"`` option). If it is a list or tuple, the first\n is for the horizontal axis, the second for the vertical axis.\n The options are below:\n\n - If one of the entries is a formatter object, then it used.\n A value of None means to use the default locator (so using\n ``tick_formatter=[None, my_formatter]`` uses my_formatter\n for the vertical axis and the default for the horizontal axis).\n\n - If one of the entries is a symbolic constant such as `\\pi`,\n `e`, or `sqrt(2)`, ticks will be formatted nicely at rational\n multiples of this constant.\n\n .. warning::\n\n This should only be used with the ``ticks`` option using nice\n rational multiples of that constant!\n\n - If one of the entries is the string ``\"latex\"``, then the\n formatting will be nice typesetting of the ticks. This is\n intended to be used when the tick locator for at least one of\n the axes is a list including some symbolic elements. This uses\n matplotlib's internal LaTeX rendering engine. If you want to\n use an external LaTeX compiler, then set the keyword option\n ``typeset``. See examples.\n\n - ``title`` - (default: None) The title for the plot\n\n - ``title_pos`` - (default: None) The position of the title for the\n plot. It must be a tuple or a list of two real numbers\n ``(x_pos, y_pos)`` which indicate the relative position of the\n title within the plot. The plot itself can be considered to\n occupy, in relative terms, the region within a unit square\n `[0, 1] \\times [0, 1]`. The title text is centered around the\n horizontal factor ``x_pos`` of the plot. The baseline of the\n title text is present at the vertical factor ``y_pos`` of the\n plot. Hence, ``title_pos=(0.5, 0.5)`` will center the title in\n the plot, whereas ``title_pos=(0.5, 1.1)`` will center the\n title along the horizontal direction, but will place the title\n a fraction `0.1` times above the plot.\n\n - If the first entry is a list of strings (or numbers), then the\n formatting for the horizontal axis will be typeset with the strings\n present in the list. Each entry of the list of strings must be\n provided with a corresponding number in the first entry of\n ``ticks`` to indicate its position on the axis. To typeset the\n strings with ``\"latex\"`` enclose them within ``\"$\"`` symbols. To\n have similar custom formatting of the labels along the vertical\n axis, the second entry must be a list of strings and the second\n entry of ``ticks`` must also be a list of numbers which give the\n positions of the labels. See the examples below.\n\n - ``show_legend`` - (default: None) If True, show the legend\n\n - ``legend_*`` - all the options valid for :meth:`set_legend_options`\n prefixed with ``legend_``\n\n - ``base`` - (default: 10) the base of the logarithm if\n a logarithmic scale is set. This must be greater than 1. The base\n can be also given as a list or tuple ``(basex, basey)``.\n ``basex`` sets the base of the logarithm along the horizontal\n axis and ``basey`` sets the base along the vertical axis.\n\n - ``scale`` -- (default: ``\"linear\"``) string. The scale of the axes.\n Possible values are\n\n - ``\"linear\"`` -- linear scaling of both the axes\n - ``\"loglog\"`` -- sets both the horizontal and vertical axes to\n logarithmic scale\n - ``\"semilogx\"`` -- sets only the horizontal axis to logarithmic\n scale.\n - ``\"semilogy\"`` -- sets only the vertical axis to logarithmic\n scale.\n\n The scale can be also be given as single argument that is a list\n or tuple ``(scale, base)`` or ``(scale, basex, basey)``.\n\n .. note::\n\n - If the ``scale`` is ``\"linear\"``, then irrespective of what\n ``base`` is set to, it will default to 10 and will remain\n unused.\n\n - ``xmin`` -- starting x value in the rendered figure.\n\n - ``xmax`` -- ending x value in the rendered figure.\n\n - ``ymin`` -- starting y value in the rendered figure.\n\n - ``ymax`` -- ending y value in the rendered figure.\n\n - ``flip_x`` -- (default: False) boolean. If True, flip the horizontal\n axis.\n\n - ``flip_y`` -- (default: False) boolean. If True, flip the vertical\n axis.\n\n - ``typeset`` -- (default: ``\"default\"``) string. The type of\n font rendering that should be used for the text. The possible\n values are\n\n - ``\"default\"`` -- Uses matplotlib's internal text rendering\n engine called Mathtext ( see\n https://matplotlib.org/users/mathtext.html ). If you have\n modified the default matplotlib settings, for instance via\n a matplotlibrc file, then this option will not change any of\n those settings.\n - ``\"latex\"`` -- LaTeX is used for rendering the fonts. This\n requires LaTeX, dvipng and Ghostscript to be installed.\n - ``\"type1\"`` -- Type 1 fonts are used by matplotlib in the text\n in the figure. This requires LaTeX, dvipng and Ghostscript to\n be installed.\n\n OUTPUT:\n\n This method does not return anything. Use :meth:`save` if you\n want to save the figure as an image.\n\n EXAMPLES::\n\n sage: c = circle((1,1), 1, color='red')\n sage: c.show(xmin=-1, xmax=3, ymin=-1, ymax=3)\n\n You can make the picture larger by changing ``figsize`` with width,\n height each having a maximum value of 327 inches at default dpi::\n\n sage: p = ellipse((0,0),4,1)\n sage: p.show(figsize=[327,10],dpi=100)\n sage: p.show(figsize=[328,10],dpi=80)\n\n You can turn off the drawing of the axes::\n\n sage: show(plot(sin,-4,4), axes=False)\n\n You can also label the axes. Putting something in dollar\n signs formats it as a mathematical expression::\n\n sage: show(plot(sin,-4,4), axes_labels=('$x$','$y$'))\n\n You can add a title to a plot::\n\n sage: show(plot(sin,-4,4), title=r'A plot of $\\sin(x)$')\n\n You can also provide the position for the title to the plot. In the\n plot below the title is placed on the bottom left of the figure.::\n\n sage: plot(sin, -4, 4, title='Plot sin(x)', title_pos=(0.05,-0.05))\n Graphics object consisting of 1 graphics primitive\n\n If you want all the text to be rendered by using an external LaTeX\n installation then set the ``typeset`` to ``\"latex\"``. This\n requires that LaTeX, dvipng and Ghostscript be installed::\n\n sage: plot(x, typeset='latex') # optional - latex\n Graphics object consisting of 1 graphics primitive\n\n If you want all the text in your plot to use Type 1 fonts, then\n set the ``typeset`` option to ``\"type1\"``. This requires that\n LaTeX, dvipng and Ghostscript be installed::\n\n sage: plot(x, typeset='type1') # optional - latex\n Graphics object consisting of 1 graphics primitive\n\n You can turn on the drawing of a frame around the plots::\n\n sage: show(plot(sin,-4,4), frame=True)\n\n You can make the background transparent::\n\n sage: plot(sin(x), (x, -4, 4), transparent=True)\n Graphics object consisting of 1 graphics primitive\n\n Prior to :trac:`19485`, legends by default had a shadowless gray\n background. This behavior can be recovered by passing in certain\n ``legend_options``::\n\n sage: p = plot(sin(x), legend_label=r'$\\sin(x)$')\n sage: p.show(legend_options={'back_color': (0.9,0.9,0.9),\n ....: 'shadow': False})\n\n We can change the scale of the axes in the graphics before\n displaying::\n\n sage: G = plot(exp, 1, 10)\n sage: G.show(scale='semilogy')\n\n We can change the base of the logarithm too. The following changes\n the vertical axis to be on log scale, and with base 2. Note that\n the ``base`` argument will ignore any changes to the axis which is\n in linear scale.::\n\n sage: G.show(scale='semilogy', base=2) # long time # y axis as powers of 2\n\n ::\n\n sage: G.show(scale='semilogy', base=(3,2)) # base ignored for x-axis\n\n The scale can be also given as a 2-tuple or a 3-tuple.::\n\n sage: G.show(scale=('loglog', 2.1)) # long time # both x and y axes in base 2.1\n\n ::\n\n sage: G.show(scale=('loglog', 2, 3)) # long time # x in base 2, y in base 3\n\n The base need not be an integer, though it does have to be made\n a float.::\n\n sage: G.show(scale='semilogx', base=float(e)) # base is e\n\n Logarithmic scale can be used for various kinds of plots. Here are\n some examples.::\n\n sage: G = list_plot([10**i for i in range(10)]) # long time\n sage: G.show(scale='semilogy') # long time\n\n ::\n\n sage: G = parametric_plot((x, x**2), (x, 1, 10))\n sage: G.show(scale='loglog')\n\n ::\n\n sage: disk((5,5), 4, (0, 3*pi/2)).show(scale='loglog',base=2)\n\n ::\n\n sage: x, y = var('x, y')\n sage: G = plot_vector_field((2^x,y^2),(x,1,10),(y,1,100))\n sage: G.show(scale='semilogx',base=2)\n\n Flip the horizontal or vertical axis.\n\n ::\n\n sage: G = plot(x^3, -2, 3)\n sage: G.show(flip_x=True)\n sage: G.show(flip_y=True)\n\n Add grid lines at the major ticks of the axes.\n\n ::\n\n sage: c = circle((0,0), 1)\n sage: c.show(gridlines=True)\n sage: c.show(gridlines=\"automatic\")\n sage: c.show(gridlines=\"major\")\n\n Add grid lines at the major and minor ticks of the axes.\n\n ::\n\n sage: u,v = var('u v')\n sage: f = exp(-(u^2+v^2))\n sage: p = plot_vector_field(f.gradient(), (u,-2,2), (v,-2,2))\n sage: p.show(gridlines=\"minor\")\n\n Add only horizontal or vertical grid lines.\n\n ::\n\n sage: p = plot(sin,-10,20)\n sage: p.show(gridlines=[None, \"automatic\"])\n sage: p.show(gridlines=[\"minor\", False])\n\n Add grid lines at specific positions (using lists/tuples).\n\n ::\n\n sage: x, y = var('x, y')\n sage: p = implicit_plot((y^2-x^2)*(x-1)*(2*x-3)-4*(x^2+y^2-2*x)^2, \\\n ....: (x,-2,2), (y,-2,2), plot_points=1000)\n sage: p.show(gridlines=[[1,0],[-1,0,1]])\n\n Add grid lines at specific positions (using iterators).\n\n ::\n\n sage: def maple_leaf(t):\n ....: return (100/(100+(t-pi/2)^8))*(2-sin(7*t)-cos(30*t)/2)\n sage: p = polar_plot(maple_leaf, -pi/4, 3*pi/2, color=\"red\",plot_points=1000) # long time\n sage: p.show(gridlines=([-3,-2.75,..,3], range(-1,5,2))) # long time\n\n Add grid lines at specific positions (using functions).\n\n ::\n\n sage: y = x^5 + 4*x^4 - 10*x^3 - 40*x^2 + 9*x + 36\n sage: p = plot(y, -4.1, 1.1)\n sage: xlines = lambda a,b: [z for z,m in y.roots()]\n sage: p.show(gridlines=[xlines, [0]], frame=True, axes=False)\n\n Change the style of all the grid lines.\n\n ::\n\n sage: b = bar_chart([-3,5,-6,11], color='red')\n sage: b.show(gridlines=([-1,-0.5,..,4],True),\n ....: gridlinesstyle=dict(color=\"blue\", linestyle=\":\"))\n\n Change the style of the horizontal or vertical grid lines\n separately.\n\n ::\n\n sage: p = polar_plot(2 + 2*cos(x), 0, 2*pi, color=hue(0.3))\n sage: p.show(gridlines=True,\n ....: hgridlinesstyle=dict(color=\"orange\", linewidth=1.0),\n ....: vgridlinesstyle=dict(color=\"blue\", linestyle=\":\"))\n\n Change the style of each grid line individually.\n\n ::\n\n sage: x, y = var('x, y')\n sage: p = implicit_plot((y^2-x^2)*(x-1)*(2*x-3)-4*(x^2+y^2-2*x)^2,\n ....: (x,-2,2), (y,-2,2), plot_points=1000)\n sage: p.show(gridlines=(\n ....: [\n ....: (1,{\"color\":\"red\",\"linestyle\":\":\"}),\n ....: (0,{\"color\":\"blue\",\"linestyle\":\"--\"})\n ....: ],\n ....: [\n ....: (-1,{\"color\":\"red\",\"linestyle\":\":\"}),\n ....: (0,{\"color\":\"blue\",\"linestyle\":\"--\"}),\n ....: (1,{\"color\":\"red\",\"linestyle\":\":\"}),\n ....: ]\n ....: ),\n ....: gridlinesstyle=dict(marker='x',color=\"black\"))\n\n Grid lines can be added to contour plots.\n\n ::\n\n sage: f = sin(x^2 + y^2)*cos(x)*sin(y)\n sage: c = contour_plot(f, (x, -4, 4), (y, -4, 4), plot_points=100)\n sage: c.show(gridlines=True, gridlinesstyle={'linestyle':':','linewidth':1, 'color':'red'})\n\n Grid lines can be added to matrix plots.\n\n ::\n\n sage: M = MatrixSpace(QQ,10).random_element()\n sage: matrix_plot(M).show(gridlines=True)\n\n By default, Sage increases the horizontal and vertical axes\n limits by a certain percentage in all directions. This is\n controlled by the ``axes_pad`` parameter. Increasing the range\n of the axes helps avoid problems with lines and dots being\n clipped because the linewidth extends beyond the axes. To get\n axes limits that are exactly what is specified, set\n ``axes_pad`` to zero. Compare the following two examples\n\n ::\n\n sage: plot(sin(x), (x, -pi, pi),thickness=2)+point((pi, -1), pointsize=15)\n Graphics object consisting of 2 graphics primitives\n sage: plot(sin(x), (x, -pi, pi),thickness=2,axes_pad=0)+point((pi, -1), pointsize=15)\n Graphics object consisting of 2 graphics primitives\n\n The behavior of the ``axes_pad`` parameter is different if the axis\n is in the ``\"log\"`` scale. If `b` is the base of the axis, the\n minimum value of the axis, is decreased by the factor\n `1/b^{\\mathrm{axes\\_pad}}` of the minimum and the maximum value of the axis\n is increased by the same factor of the maximum value. Compare the\n axes in the following two plots to see the difference.\n\n ::\n\n sage: plot_loglog(x, (1.1*10**-2, 9990))\n Graphics object consisting of 1 graphics primitive\n\n sage: plot_loglog(x, (1.1*10**-2, 9990), axes_pad=0)\n Graphics object consisting of 1 graphics primitive\n\n Via matplotlib, Sage allows setting of custom ticks. See above\n for more details.\n\n Here the labels are not so useful::\n\n sage: plot(sin(pi*x), (x, -8, 8))\n Graphics object consisting of 1 graphics primitive\n\n Now put ticks at multiples of 2::\n\n sage: plot(sin(pi*x), (x, -8, 8), ticks=2)\n Graphics object consisting of 1 graphics primitive\n\n Or just choose where you want the ticks::\n\n sage: plot(sin(pi*x), (x, -8, 8), ticks=[[-7,-3,0,3,7],[-1/2,0,1/2]])\n Graphics object consisting of 1 graphics primitive\n\n Or no ticks at all::\n\n sage: plot(sin(pi*x), (x, -8, 8), ticks=[[],[]])\n Graphics object consisting of 1 graphics primitive\n\n This can be very helpful in showing certain features of plots. ::\n\n sage: plot(1.5/(1+e^(-x)), (x, -10, 10)) # doesn't quite show value of inflection point\n Graphics object consisting of 1 graphics primitive\n\n ::\n\n sage: plot(1.5/(1+e^(-x)), (x, -10, 10), ticks=[None, 1.5/4]) # It's right at f(x)=0.75!\n Graphics object consisting of 1 graphics primitive\n\n But be careful to leave enough room for at least two major ticks, so that\n the user can tell what the scale is::\n\n sage: plot(x^2,(x,1,8),ticks=6).show()\n Traceback (most recent call last):\n ...\n ValueError: Expand the range of the independent variable to\n allow two multiples of your tick locator (option `ticks`).\n\n We can also do custom formatting if you need it. See above for full\n details::\n\n sage: plot(2*x+1,(x,0,5),ticks=[[0,1,e,pi,sqrt(20)],2],tick_formatter=\"latex\")\n Graphics object consisting of 1 graphics primitive\n\n This is particularly useful when setting custom ticks in multiples\n of `\\pi`.\n\n ::\n\n sage: plot(sin(x),(x,0,2*pi),ticks=pi/3,tick_formatter=pi)\n Graphics object consisting of 1 graphics primitive\n\n But keep in mind that you will get exactly the formatting you asked\n for if you specify both formatters. The first syntax is recommended\n for best style in that case. ::\n\n sage: plot(arcsin(x),(x,-1,1),ticks=[None,pi/6],tick_formatter=[\"latex\",pi]) # Nice-looking!\n Graphics object consisting of 1 graphics primitive\n\n ::\n\n sage: plot(arcsin(x),(x,-1,1),ticks=[None,pi/6],tick_formatter=[None,pi]) # Not so nice-looking\n Graphics object consisting of 1 graphics primitive\n\n Custom tick labels can be provided by providing the keyword\n ``tick_formatter`` with the list of labels, and simultaneously\n providing the keyword ``ticks`` with the positions of the labels. ::\n\n sage: plot(x, (x,0,3), ticks=[[1,2.5],[0.5,1,2]], tick_formatter=[[\"$x_1$\",\"$x_2$\"],[\"$y_1$\",\"$y_2$\",\"$y_3$\"]])\n Graphics object consisting of 1 graphics primitive\n\n The following sets the custom tick labels only along the horizontal\n axis. ::\n\n sage: plot(x**2, (x,0,2), ticks=[[1,2], None], tick_formatter=[[\"$x_1$\",\"$x_2$\"], None])\n Graphics object consisting of 1 graphics primitive\n\n If the number of tick labels do not match the number of positions of\n tick labels, then it results in an error.::\n\n sage: plot(x**2, (x,0,2), ticks=[[2], None], tick_formatter=[[\"$x_1$\",\"$x_2$\"], None]).show()\n Traceback (most recent call last):\n ...\n ValueError: If the first component of the list `tick_formatter` is a list then the first component of `ticks` must also be a list of equal length.\n\n When using logarithmic scale along the axis, make sure to have\n enough room for two ticks so that the user can tell what the scale\n is. This can be effected by increasing the range of the independent\n variable, or by changing the ``base``, or by providing enough tick\n locations by using the ``ticks`` parameter.\n\n By default, Sage will expand the variable range so that at least two\n ticks are included along the logarithmic axis. However, if you\n specify ``ticks`` manually, this safety measure can be defeated::\n\n sage: list_plot_loglog([(1,2),(2,3)], plotjoined=True, ticks=[[1],[1]])\n doctest:...: UserWarning: The x-axis contains fewer than 2 ticks;\n the logarithmic scale of the plot may not be apparent to the reader.\n doctest:...: UserWarning: The y-axis contains fewer than 2 ticks;\n the logarithmic scale of the plot may not be apparent to the reader.\n Graphics object consisting of 1 graphics primitive\n\n This one works, since the horizontal axis is automatically expanded\n to contain two ticks and the vertical axis is provided with two ticks::\n\n sage: list_plot_loglog([(1,2),(2,3)], plotjoined=True, ticks=[None,[1,10]])\n Graphics object consisting of 1 graphics primitive\n\n Another example in the log scale where both the axes are automatically\n expanded to show two major ticks::\n\n sage: list_plot_loglog([(2,0.5), (3, 4)], plotjoined=True)\n Graphics object consisting of 1 graphics primitive\n\n When using ``title_pos``, it must be ensured that a list or a tuple\n of length two is used. Otherwise, a warning is raised::\n\n sage: plot(x, -4, 4, title='Plot x', title_pos=0.05)\n doctest:...: ...RichReprWarning: Exception in _rich_repr_ while displaying object: 'title_pos' must be a list or tuple of two real numbers.\n Graphics object consisting of 1 graphics primitive\n\n TESTS:\n\n The following tests result in a segmentation fault and should not\n be run or doctested::\n\n sage: p = ellipse((0,0),4,1)\n sage: p.show(figsize=[232,232],dpi=100) # not tested\n ------------------------------------------------------------------------\n Unhandled SIGSEGV: A segmentation fault occurred.\n This probably occurred because a *compiled* module has a bug\n in it and is not properly wrapped with sig_on(), sig_off().\n Python will now terminate.\n ------------------------------------------------------------------------\n sage: p.show(figsize=[327,181],dpi=100) # not tested\n ------------------------------------------------------------------------\n Unhandled SIGSEGV: A segmentation fault occurred.\n This probably occurred because a *compiled* module has a bug\n in it and is not properly wrapped with sig_on(), sig_off().\n Python will now terminate.\n ------------------------------------------------------------------------\n\n The following tests ensure we give a good error message for\n negative figsizes::\n\n sage: P = plot(x^2,(x,0,1))\n sage: P.show(figsize=[-1,1])\n Traceback (most recent call last):\n ...\n ValueError: figsize should be positive numbers, not -1.0 and 1.0\n sage: P.show(figsize=-1)\n Traceback (most recent call last):\n ...\n ValueError: figsize should be positive, not -1.0\n sage: P.show(figsize=x^2)\n Traceback (most recent call last):\n ...\n TypeError: figsize should be a positive number, not x^2\n sage: P.show(figsize=[2,3,4])\n Traceback (most recent call last):\n ...\n ValueError: figsize should be a positive number or a list of two positive numbers, not [2, 3, 4]\n sage: P.show(figsize=[sqrt(2),sqrt(3)])\n \"\"\"\n from sage.repl.rich_output import get_display_manager\n dm = get_display_manager()\n dm.display_immediately(self, **kwds)\n\n def xmin(self, xmin=None):\n \"\"\"\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: g.xmin()\n -1.0\n sage: g.xmin(-3)\n sage: g.xmin()\n -3.0\n \"\"\"\n if xmin is None:\n return self.get_axes_range()['xmin']\n else:\n self.set_axes_range(xmin=xmin)\n\n def xmax(self, xmax=None):\n \"\"\"\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: g.xmax()\n 3.0\n sage: g.xmax(10)\n sage: g.xmax()\n 10.0\n \"\"\"\n if xmax is None:\n return self.get_axes_range()['xmax']\n else:\n self.set_axes_range(xmax=xmax)\n\n def ymin(self, ymin=None):\n \"\"\"\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: g.ymin()\n 1.0\n sage: g.ymin(-3)\n sage: g.ymin()\n -3.0\n \"\"\"\n if ymin is None:\n return self.get_axes_range()['ymin']\n else:\n self.set_axes_range(ymin=ymin)\n\n def ymax(self, ymax=None):\n \"\"\"\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: g.ymax()\n 2.0\n sage: g.ymax(10)\n sage: g.ymax()\n 10.0\n \"\"\"\n if ymax is None:\n return self.get_axes_range()['ymax']\n else:\n self.set_axes_range(ymax=ymax)\n\n def get_minmax_data(self):\n r\"\"\"\n Return the x and y coordinate minimum and maximum\n\n .. warning::\n\n The returned dictionary is mutable, but changing it does\n not change the xmin/xmax/ymin/ymax data. The minmax data is a function\n of the primitives which make up this Graphics object. To change the\n range of the axes, call methods :meth:`xmin`, :meth:`xmax`,\n :meth:`ymin`, :meth:`ymax`, or :meth:`set_axes_range`.\n\n OUTPUT:\n\n A dictionary whose keys give the xmin, xmax, ymin, and ymax\n data for this graphic.\n\n EXAMPLES::\n\n sage: g = line([(-1,1), (3,2)])\n sage: list(sorted(g.get_minmax_data().items()))\n [('xmax', 3.0), ('xmin', -1.0), ('ymax', 2.0), ('ymin', 1.0)]\n\n Note that changing ymax doesn't change the output of get_minmax_data::\n\n sage: g.ymax(10)\n sage: list(sorted(g.get_minmax_data().items()))\n [('xmax', 3.0), ('xmin', -1.0), ('ymax', 2.0), ('ymin', 1.0)]\n\n The width/height ratio (in output units, after factoring in the\n chosen aspect ratio) of the plot is limited to `10^{-15}\\dots\n 10^{15}`, otherwise floating point errors cause problems in\n matplotlib::\n\n sage: l = line([(1e-19,-1), (-1e-19,+1)], aspect_ratio=1.0)\n sage: l.get_minmax_data()\n {'xmax': 1.00010000000000e-15,\n 'xmin': -9.99900000000000e-16,\n 'ymax': 1.0,\n 'ymin': -1.0}\n sage: l = line([(0,0), (1,1)], aspect_ratio=1e19)\n sage: l.get_minmax_data()\n {'xmax': 5000.50000000000, 'xmin': -4999.50000000000, 'ymax': 1.0, 'ymin': 0.0}\n \"\"\"\n objects = self._objects\n if objects:\n minmax_data = [o.get_minmax_data() for o in objects]\n xmin = min(d['xmin'] for d in minmax_data)\n xmax = max(d['xmax'] for d in minmax_data)\n ymin = min(d['ymin'] for d in minmax_data)\n ymax = max(d['ymax'] for d in minmax_data)\n if isnan(xmin):\n xmin = 0\n sage.misc.verbose.verbose(\"xmin was NaN (setting to 0)\", level=0)\n if isnan(xmax):\n xmax = 0\n sage.misc.verbose.verbose(\"xmax was NaN (setting to 0)\", level=0)\n if isnan(ymin):\n ymin = 0\n sage.misc.verbose.verbose(\"ymin was NaN (setting to 0)\", level=0)\n if isnan(ymax):\n ymax = 0\n sage.misc.verbose.verbose(\"ymax was NaN (setting to 0)\", level=0)\n else:\n xmin = xmax = ymin = ymax = 0\n\n if xmin == xmax:\n xmin -= 1\n xmax += 1\n if ymin == ymax:\n ymin -= 1\n ymax += 1\n return self._limit_output_aspect_ratio(xmin, xmax, ymin, ymax)\n\n def _limit_output_aspect_ratio(self, xmin, xmax, ymin, ymax):\n r\"\"\"\n Private helper function for :meth:`get_minmax_data`\n\n INPUT:\n\n - ``xmin``, ``xmax``, ``ymin``, ``ymax`` -- bounding box for\n the graphics.\n\n OUTPUT:\n\n A dictionary whose keys give the xmin, xmax, ymin, and ymax\n data for this graphic. Possibly enlarged in order to keep the\n width/height ratio (in output units, after factoring in the\n chosen aspect ratio) of the plot is limited to `10^{-15}\\dots\n 10^{15}` to avoid floating point issues in matplotlib.\n\n EXAMPLES::\n\n sage: l = line([(0,0), (1,1)], aspect_ratio=1.0)\n sage: l._limit_output_aspect_ratio(1, 2, 1e19, 3)\n {'xmax': -4999.50000000000,\n 'xmin': 5000.50000000000,\n 'ymax': 3,\n 'ymin': 1.00000000000000e19}\n sage: l._limit_output_aspect_ratio(1, 2, 3, 1e19)\n {'xmax': 5000.50000000000,\n 'xmin': -4999.50000000000,\n 'ymax': 1.00000000000000e19,\n 'ymin': 3}\n sage: l = line([(0,0), (1,1)], aspect_ratio=1e16)\n sage: l._limit_output_aspect_ratio(0, 1, 2, 3)\n {'xmax': 5.50000000000000, 'xmin': -4.50000000000000, 'ymax': 3, 'ymin': 2}\n \"\"\"\n aspect_ratio = self.aspect_ratio()\n if aspect_ratio != 'automatic':\n width = xmax - xmin\n height = ymax - ymin\n output_aspect = abs(width / height / aspect_ratio)\n if output_aspect > 1e15:\n height = 1e15 * width / aspect_ratio\n ycenter = (ymax - ymin) / 2\n ymin = ycenter - height / 2\n ymax = ycenter + height / 2\n if output_aspect < 1e-15:\n width = 1e-15 * height * aspect_ratio\n xcenter = (xmax - xmin) / 2\n xmin = xcenter - width / 2\n xmax = xcenter + width / 2\n return {'xmin': xmin, 'xmax': xmax, 'ymin': ymin, 'ymax': ymax}\n\n def _matplotlib_tick_formatter(self, subplot, base=(10, 10),\n locator_options={}, scale=('linear', 'linear'),\n tick_formatter=(None, None), ticks=(None, None),\n xmax=None, xmin=None, ymax=None, ymin=None):\n r\"\"\"\n Take a matplotlib subplot instance representing the graphic and set\n the ticks formatting. This function is only for internal use.\n\n INPUT:\n - ``subplot`` -- the subplot instance.\n\n EXAMPLES::\n\n sage: from matplotlib.figure import Figure\n sage: p = plot(x); d = p.get_minmax_data()\n sage: subplot = Figure().add_subplot(111)\n sage: p._objects[0]._render_on_subplot(subplot)\n sage: p._matplotlib_tick_formatter(subplot, **d)\n (<AxesSubplot:>,\n <matplotlib.ticker.MaxNLocator object at ...>,\n <matplotlib.ticker.MaxNLocator object at ...>,\n <matplotlib.ticker.ScalarFormatter object at ...>,\n <matplotlib.ticker.ScalarFormatter object at ...>)\n \"\"\"\n # This function is created to refactor some code that is repeated\n # in the matplotlib function\n from matplotlib.ticker import (FixedLocator, Locator,\n LogFormatterMathtext, LogLocator, MaxNLocator,\n MultipleLocator, NullLocator, ScalarFormatter)\n\n x_locator, y_locator = ticks\n # ---------------------- Location of x-ticks ---------------------\n\n if x_locator is None:\n if scale[0] == 'log':\n x_locator = LogLocator(base=base[0])\n else:\n x_locator = MaxNLocator(**locator_options)\n elif isinstance(x_locator, Locator):\n pass\n elif x_locator == []:\n x_locator = NullLocator()\n elif isinstance(x_locator, list):\n x_locator = FixedLocator(x_locator)\n else: # x_locator is a number which can be made a float\n from sage.functions.other import ceil, floor\n if floor(xmax / x_locator) - ceil(xmin / x_locator) > 1:\n x_locator = MultipleLocator(float(x_locator))\n else: # not enough room for two major ticks\n raise ValueError('Expand the range of the independent '\n 'variable to allow two multiples of your tick locator '\n '(option `ticks`).')\n\n # ---------------------- Location of y-ticks ---------------------\n if y_locator is None:\n if scale[1] == 'log':\n y_locator = LogLocator(base=base[1])\n else:\n y_locator = MaxNLocator(**locator_options)\n elif isinstance(y_locator, Locator):\n pass\n elif y_locator == []:\n y_locator = NullLocator()\n elif isinstance(y_locator, list):\n y_locator = FixedLocator(y_locator)\n else: # y_locator is a number which can be made a float\n from sage.functions.other import ceil, floor\n if floor(ymax / y_locator) - ceil(ymin / y_locator) > 1:\n y_locator = MultipleLocator(float(y_locator))\n else: # not enough room for two major ticks\n raise ValueError('Expand the range of the dependent '\n 'variable to allow two multiples of your tick locator '\n '(option `ticks`).')\n\n x_formatter, y_formatter = tick_formatter\n from matplotlib.ticker import FuncFormatter, FixedFormatter\n from sage.misc.latex import latex\n from sage.symbolic.ring import SR\n from .misc import _multiple_of_constant\n # ---------------------- Formatting x-ticks ----------------------\n if x_formatter is None:\n if scale[0] == 'log':\n x_formatter = LogFormatterMathtext(base=base[0])\n else:\n x_formatter = ScalarFormatter()\n elif x_formatter in SR:\n x_const = x_formatter\n x_formatter = FuncFormatter(lambda n, pos:\n _multiple_of_constant(n, pos, x_const))\n elif x_formatter == \"latex\":\n if scale[0] == 'log':\n # We need to strip out '\\\\mathdefault' from the string\n x_formatter = FuncFormatter(lambda n, pos:\n LogFormatterMathtext(base=base[0])(n, pos).replace(\n \"\\\\mathdefault\", \"\"))\n else:\n x_formatter = FuncFormatter(lambda n, pos: '$%s$' % latex(n))\n elif isinstance(x_formatter, (list, tuple)):\n if (not isinstance(ticks[0], (list, tuple)) or\n len(ticks[0]) != len(x_formatter)):\n raise ValueError(\"If the first component of the list \"\n \"`tick_formatter` is a list then the first component \"\n \"of `ticks` must also be a list of equal length.\")\n x_formatter = FixedFormatter(x_formatter)\n # ---------------------- Formatting y-ticks ----------------------\n if y_formatter is None:\n if scale[1] == 'log':\n y_formatter = LogFormatterMathtext(base=base[1])\n else:\n y_formatter = ScalarFormatter()\n elif y_formatter in SR:\n y_const = y_formatter\n y_formatter = FuncFormatter(lambda n, pos:\n _multiple_of_constant(n, pos, y_const))\n elif y_formatter == \"latex\":\n if scale[1] == 'log':\n # We need to strip out '\\\\mathdefault' from the string\n y_formatter = FuncFormatter(lambda n, pos:\n LogFormatterMathtext(base=base[1])(n, pos).replace(\n \"\\\\mathdefault\", \"\"))\n else:\n y_formatter = FuncFormatter(lambda n, pos: '$%s$' % latex(n))\n elif isinstance(y_formatter, (list, tuple)):\n if (not isinstance(ticks[1], (list, tuple)) or\n len(ticks[1]) != len(y_formatter)):\n raise ValueError(\"If the second component of the list \"\n \"`tick_formatter` is a list then the second component \"\n \"of `ticks` must also be a list of equal length.\")\n y_formatter = FixedFormatter(y_formatter)\n\n subplot.xaxis.set_major_locator(x_locator)\n subplot.yaxis.set_major_locator(y_locator)\n subplot.xaxis.set_major_formatter(x_formatter)\n subplot.yaxis.set_major_formatter(y_formatter)\n\n # Check for whether there will be too few ticks in the log scale case.\n # If there are not enough ticks (2 or more) to determine that the scale\n # is non-linear, we throw a warning.\n from warnings import warn\n tickwarnmsg = 'The %s-axis contains fewer than 2 ticks; '\n tickwarnmsg += 'the logarithmic scale of the plot may not be apparent '\n tickwarnmsg += 'to the reader.'\n\n if (scale[0] == 'log' and not isinstance(x_locator, NullLocator) and\n len(subplot.xaxis.get_ticklocs()) < 2):\n warn(tickwarnmsg % 'x')\n\n if (scale[1] == 'log' and not isinstance(y_locator, NullLocator) and\n len(subplot.yaxis.get_ticklocs()) < 2):\n warn(tickwarnmsg % 'y')\n\n return (subplot, x_locator, y_locator, x_formatter, y_formatter)\n\n def _get_vmin_vmax(self, vmin, vmax, basev, axes_pad):\n r\"\"\"\n Determine the min/max value for a variable plotted on a logarithmic\n scale. The motivation is that we desire at least two ticks for a log\n plot; otherwise the reader may assume that the scale is linear. For\n internal use only.\n\n We check if this case occurs (for e.g. assuming xmin < xmax):\n\n floor(logxmin) ceil(logxmax)\n ----|---------+----------+----------|----------------------|--\n logxmin logxmax\n\n Or if this case occurs (assuming xmin < xmax):\n\n floor(logxmin) floor(logxmax) ceil(logxmax)\n ----|---------+---------------------|-----+----------------|--\n logxmin logxmax\n\n\n INPUT:\n\n - ``vmin`` - the current min for this variable (e.g. xmin or ymin)\n\n - ``vmax`` - the current max for this variable (e.g. xmax or ymax)\n\n - ``basev`` - the base of the logarithmic scale for this variable\n\n - ``axes_pad`` - the padding for the axis. It determines the\n exponent of the fraction of the minimum (resp. maximum) that is\n subtracted from the minimum (resp. added to the maximum) value of\n the axis. For instance if the minimum is `m` and the base of the\n axis is `b` then the new minimum after padding the axis will be\n `m - m/b^{\\mathrm{axes\\_pad}}`.\n\n OUTPUT:\n\n A new (min,max) pair for this variable, suitable for its logarithmic\n scale.\n\n EXAMPLES:\n\n On a base-10 logarithmic scale, we should have ``vmin``/``vmax``\n at least 10 units apart::\n\n sage: p = Graphics()\n sage: p._get_vmin_vmax(1, 2, 10, None) == (9/10, 10)\n True\n sage: p._get_vmin_vmax(1, 5, 10, None) == (9/10, 10)\n True\n sage: p._get_vmin_vmax(1, 10, 10, None)\n (9/10, 11)\n sage: p._get_vmin_vmax(1, 11, 10, None)\n (9/10, 121/10)\n sage: p._get_vmin_vmax(1, 50, 10, None)\n (9/10, 55)\n\n We can set the ``axes_pad`` separately::\n\n sage: p._get_vmin_vmax(1, 50, 2, 2)\n (0.75, 62.5)\n\n Nonpositive values of ``vmin`` are not accepted due to the domain\n of the logarithm function::\n\n sage: p = Graphics()\n sage: p._get_vmin_vmax(-1,2,10, None)\n Traceback (most recent call last):\n ...\n ValueError: vmin must be positive\n\n And ``vmax`` must be greater than ``vmin``::\n\n sage: p._get_vmin_vmax(1,-2,10, None)\n Traceback (most recent call last):\n ...\n ValueError: vmin must be less than vmax\n\n \"\"\"\n if vmin <= 0:\n raise ValueError('vmin must be positive')\n\n if vmin >= vmax:\n raise ValueError('vmin must be less than vmax')\n\n import math\n if axes_pad is None:\n axes_pad = 1\n else:\n axes_pad = float(abs(axes_pad))\n\n logvmin = math.log(vmin) / math.log(basev)\n logvmax = math.log(vmax) / math.log(basev)\n\n if math.floor(logvmax) - math.ceil(logvmin) < 0:\n vmax = basev**math.ceil(logvmax)\n vmin = basev**math.floor(logvmin)\n elif math.floor(logvmax) - math.ceil(logvmin) < 1:\n if logvmax - math.floor(logvmax) > math.ceil(logvmin) - logvmin:\n vmax = basev**math.ceil(logvmax)\n if axes_pad > 0:\n vmin -= vmin * basev**(-axes_pad)\n else:\n vmin = basev**math.floor(logvmin)\n if axes_pad > 0:\n vmax += vmax * basev**(-axes_pad)\n elif axes_pad > 0:\n # pad the axes if we haven't expanded the axes earlier.\n vmin -= vmin * basev**(-axes_pad)\n vmax += vmax * basev**(-axes_pad)\n\n return vmin, vmax\n\n def matplotlib(self, filename=None,\n xmin=None, xmax=None, ymin=None, ymax=None,\n figsize=None, figure=None, sub=None,\n axes=None, axes_labels=None, axes_labels_size=None,\n flip_x=False, flip_y=False,\n fontsize=None, frame=False, verify=True,\n aspect_ratio=None,\n gridlines=None, gridlinesstyle=None,\n vgridlinesstyle=None, hgridlinesstyle=None,\n show_legend=None, legend_options=None,\n axes_pad=None, ticks_integer=None,\n tick_formatter=None, ticks=None, title=None,\n title_pos=None, base=None, scale=None,\n stylesheet=None,\n typeset='default'):\n r\"\"\"\n Construct or modify a Matplotlib figure by drawing ``self`` on it.\n\n INPUT (partial description, involving only Matplotlib objects; see\n :meth:`show` for the other arguments):\n\n - ``figure`` -- (default: ``None``) Matplotlib figure (class\n ``matplotlib.figure.Figure``) on which ``self`` is to be displayed;\n if ``None``, the figure will be created from the parameter\n ``figsize``\n\n - ``figsize`` -- (default: ``None``) width or [width, height] in inches\n of the Matplotlib figure in case ``figure`` is ``None``; if\n ``figsize`` is ``None``, Matplotlib's default (6.4 x 4.8 inches) is\n used\n\n - ``sub`` -- (default: ``None``) subpart of the figure, as an\n instance of Matplotlib \"axes\" (class ``matplotlib.axes.Axes``) on\n which ``self`` is to be drawn; if ``None``, the subpart will be\n created so as to cover the whole figure\n\n OUTPUT:\n\n - a ``matplotlib.figure.Figure`` object; if the argument ``figure`` is\n provided, this is the same object as ``figure``.\n\n EXAMPLES::\n\n sage: c = circle((1,1),1)\n sage: print(c.matplotlib())\n Figure(640x480)\n\n To obtain the first Matplotlib ``Axes`` object inside of the\n figure, you can do something like the following.\n\n ::\n\n sage: p = plot(sin(x), (x, -2*pi, 2*pi))\n sage: figure = p.matplotlib()\n sage: axes = figure.axes[0]\n\n TESTS:\n\n We verify that :trac:`10291` is fixed::\n\n sage: p = plot(sin(x), (x, -2*pi, 2*pi))\n sage: figure = p.matplotlib()\n sage: axes_range = p.get_axes_range()\n sage: figure = p.matplotlib()\n sage: axes_range2 = p.get_axes_range()\n sage: axes_range == axes_range2\n True\n\n We verify that legend options are properly handled (:trac:`12960`).\n First, we test with no options, and next with an incomplete set of\n options.::\n\n sage: p = plot(x, legend_label='aha')\n sage: p.legend(True)\n sage: pm = p.matplotlib()\n sage: pm = p.matplotlib(legend_options={'font_size':'small'})\n\n The title should not overlap with the axes labels nor the frame in\n the following plot (see :trac:`10512`)::\n\n sage: plot(sin(x^2), (x, -3, 3), title='Plot of sin(x^2)', axes_labels=['x','y'],frame=True)\n Graphics object consisting of 1 graphics primitive\n\n ``typeset`` must not be set to an arbitrary string::\n\n sage: plot(x, typeset='garbage')\n doctest:...: ...RichReprWarning: Exception in _rich_repr_ while\n displaying object: typeset must be set to one of 'default',\n 'latex', or 'type1'; got 'garbage'.\n Graphics object consisting of 1 graphics primitive\n\n We verify that numerical options are changed to float before saving (:trac:`14741`).\n By default, Sage 5.10 changes float objects to the `RealLiteral` type.\n The patch changes them to float before creating `matplotlib` objects.::\n\n sage: f = lambda x, y : (abs(cos((x + I * y) ** 4)) - 1) # long time\n sage: g = implicit_plot(f,(-4, 4),(-3, 3),linewidth=0.6) # long time\n sage: gm = g.matplotlib() # long time # without the patch, this goes BOOM -- er, TypeError\n\n If the axes are flipped, the limits of the axes get swapped::\n\n sage: p = plot(2*x, 1, 2)\n sage: sub, = p.matplotlib(flip_y=True, flip_x=True).axes\n sage: xmin, xmax = sub.get_xlim()\n sage: ymin, ymax = sub.get_ylim()\n sage: xmin > xmax, ymin > ymax\n (True, True)\n \"\"\"\n if not isinstance(ticks, (list, tuple)):\n ticks = (ticks, None)\n if legend_options is None:\n legend_options = {}\n # as discussed in trac #25799 and #23696, Sage prefers the computer\n # modern fonts of TeX for math texts such as axes labels, but otherwise\n # adopts the default style of matplotlib\n from matplotlib import rcParams\n rcParams['mathtext.fontset'] = 'cm'\n rcParams['mathtext.rm'] = 'serif'\n\n import matplotlib.pyplot as plt\n if stylesheet in plt.style.available:\n plt.style.use(stylesheet)\n\n from sage.symbolic.ring import SR\n # make sure both formatters typeset or both don't\n if not isinstance(tick_formatter, (list, tuple)):\n if tick_formatter == \"latex\" or tick_formatter in SR:\n tick_formatter = (tick_formatter, \"latex\")\n else:\n tick_formatter = (tick_formatter, None)\n\n global do_verify\n do_verify = verify\n\n if axes is None:\n axes = self._show_axes\n\n from matplotlib.figure import Figure\n if typeset == 'type1': # Requires LaTeX, dvipng, gs to be installed.\n rcParams['ps.useafm'] = True\n rcParams['pdf.use14corefonts'] = True\n rcParams['text.usetex'] = True\n elif typeset == 'latex': # Requires LaTeX, dvipng, gs to be installed.\n rcParams['ps.useafm'] = False\n rcParams['pdf.use14corefonts'] = False\n rcParams['text.usetex'] = True\n elif typeset != 'default': # We won't change (maybe user-set) defaults\n raise ValueError(\"typeset must be set to one of 'default', 'latex',\"\n \" or 'type1'; got '{}'.\".format(typeset))\n\n self.fontsize(fontsize)\n self.axes_labels(l=axes_labels)\n self.axes_labels_size(s=axes_labels_size)\n\n # If no matplotlib figure is provided, it is created here:\n if figure is None:\n if figsize is not None:\n figsize = _parse_figsize(figsize)\n figure = Figure(figsize=figsize)\n\n # The incoming subplot instance\n subplot = sub\n if not subplot:\n subplot = figure.add_subplot(111)\n # Add all the primitives to the subplot\n old_opts = dict()\n for g in self._objects:\n opts, old_opts[g] = g.options(), g.options()\n for k, v in opts.items():\n try:\n if v.parent() in sage.categories.fields.Fields():\n opts[k] = float(v)\n except (AttributeError, TypeError):\n pass\n g.set_options(opts)\n g._render_on_subplot(subplot)\n if hasattr(g, '_bbox_extra_artists'):\n self._bbox_extra_artists.extend(g._bbox_extra_artists)\n # Set the aspect ratio\n if aspect_ratio is None:\n aspect_ratio = self.aspect_ratio()\n if aspect_ratio == 'automatic':\n subplot.set_aspect('auto', adjustable='box')\n else:\n subplot.set_aspect(aspect_ratio, adjustable='box')\n\n # ---------------- Set the axes limits and scale ------------------\n self.set_axes_range(xmin, xmax, ymin, ymax)\n d = self.get_axes_range()\n xmin = d['xmax' if flip_x else 'xmin']\n xmax = d['xmin' if flip_x else 'xmax']\n ymin = d['ymax' if flip_y else 'ymin']\n ymax = d['ymin' if flip_y else 'ymax']\n\n xscale, yscale, basex, basey = self._set_scale(subplot, scale=scale,\n base=base)\n\n # If any of the x-data are negative, we leave the min/max alone.\n if xscale == 'log' and min(xmin, xmax) > 0:\n if xmin < xmax:\n xmin, xmax = self._get_vmin_vmax(xmin, xmax, basex, axes_pad)\n else:\n xmax, xmin = self._get_vmin_vmax(xmax, xmin, basex, axes_pad)\n else:\n xpad = 0.02 if axes_pad is None else axes_pad\n xpad = (xmax - xmin) * float(xpad)\n xmax += xpad\n xmin -= xpad\n\n # Likewise for the y-data.\n if yscale == 'log' and min(ymin, ymax) > 0:\n if ymin < ymax:\n ymin, ymax = self._get_vmin_vmax(ymin, ymax, basey, axes_pad)\n else:\n ymax, ymin = self._get_vmin_vmax(ymax, ymin, basey, axes_pad)\n else:\n ypad = 0.02 if axes_pad is None else axes_pad\n ypad = (ymax - ymin) * float(ypad)\n ymax += ypad\n ymin -= ypad\n\n # -------------------------- Set the legend -----------------------\n if show_legend is None:\n show_legend = self._show_legend\n\n if show_legend:\n from matplotlib.font_manager import FontProperties\n lopts = dict()\n lopts.update(legend_options)\n lopts.update(self._legend_opts)\n prop = FontProperties(\n family=lopts.pop('font_family', 'sans-serif'),\n size=lopts.pop('font_size', 'medium'),\n style=lopts.pop('font_style', 'normal'),\n weight=lopts.pop('font_weight', 'medium'),\n variant=lopts.pop('font_variant', 'normal'))\n color = lopts.pop('back_color', 'white')\n leg = subplot.legend(prop=prop, **lopts)\n if leg is None:\n from warnings import warn\n warn(\"legend requested but no items are labeled\")\n else:\n # color\n lframe = leg.get_frame()\n lframe.set_facecolor(color)\n from sage.plot.colors import to_mpl_color\n for txt, color in zip(leg.get_texts(), self._legend_colors):\n if color is not None:\n txt.set_color(to_mpl_color(color))\n\n subplot.set_xlim([xmin, xmax])\n subplot.set_ylim([ymin, ymax])\n\n locator_options = dict(nbins=9, steps=[1, 2, 5, 10],\n integer=ticks_integer)\n\n if axes is None:\n axes = self._show_axes\n\n for spine in subplot.spines.values():\n spine.set_color(self._axes_color)\n spine.set_linewidth(self._axes_width)\n\n if frame:\n # For now, set the formatter to the old one, since that is\n # sort of what we are used to. We should eventually look at\n # the default one to see if we like it better.\n\n (subplot, x_locator, y_locator,\n x_formatter, y_formatter) = self._matplotlib_tick_formatter(\n subplot, base=(basex, basey),\n locator_options=locator_options,\n scale=(xscale, yscale),\n tick_formatter=tick_formatter, ticks=ticks,\n xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin)\n\n subplot.set_frame_on(True)\n if axes and xscale == 'linear' and yscale == 'linear':\n if (ymin <= 0 and ymax >= 0) or (ymax <= 0 and ymin >= 0):\n subplot.axhline(color=self._axes_color,\n linewidth=self._axes_width)\n if (xmin <= 0 and xmax >= 0) or (xmax <= 0 and xmin >= 0):\n subplot.axvline(color=self._axes_color,\n linewidth=self._axes_width)\n\n elif axes:\n ymiddle = False\n xmiddle = False\n # Note that the user may specify a custom xmin and xmax which\n # flips the axis horizontally. Hence we need to check for both\n # the possibilities in the if statements below. Similar\n # comments hold for ymin and ymax.\n if xscale == 'log':\n if xmax > xmin:\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_position(('outward', 10))\n subplot.yaxis.set_ticks_position('left')\n subplot.yaxis.set_label_position('left')\n yaxis = 'left'\n elif xmax < xmin:\n subplot.spines['left'].set_visible(False)\n subplot.spines['right'].set_position(('outward', 10))\n subplot.yaxis.set_ticks_position('right')\n subplot.yaxis.set_label_position('right')\n yaxis = 'right'\n elif (xmin > 0 and xmax > xmin) or (xmax > 0 and xmin > xmax):\n subplot.spines['right'].set_visible(False)\n subplot.spines['left'].set_position(('outward', 10))\n subplot.yaxis.set_ticks_position('left')\n subplot.yaxis.set_label_position('left')\n yaxis = 'left'\n elif (xmax < 0 and xmax > xmin) or (xmin < 0 and xmin > xmax):\n subplot.spines['left'].set_visible(False)\n subplot.spines['right'].set_position(('outward', 10))\n subplot.yaxis.set_ticks_position('right')\n subplot.yaxis.set_label_position('right')\n yaxis = 'right'\n else:\n subplot.spines['left'].set_position('zero')\n subplot.yaxis.set_ticks_position('left')\n subplot.yaxis.set_label_position('left')\n subplot.spines['right'].set_visible(False)\n ymiddle = True\n yaxis = 'left'\n\n if yscale == 'log':\n if ymax > ymin:\n subplot.spines['top'].set_visible(False)\n subplot.spines['bottom'].set_position(('outward', 10))\n subplot.xaxis.set_ticks_position('bottom')\n subplot.xaxis.set_label_position('bottom')\n xaxis = 'bottom'\n elif ymax < ymin:\n subplot.spines['bottom'].set_visible(False)\n subplot.spines['top'].set_position(('outward', 10))\n subplot.xaxis.set_ticks_position('top')\n subplot.xaxis.set_label_position('top')\n xaxis = 'top'\n elif (ymin > 0 and ymax > ymin) or (ymax > 0 and ymin > ymax):\n subplot.spines['top'].set_visible(False)\n subplot.spines['bottom'].set_position(('outward', 10))\n subplot.xaxis.set_ticks_position('bottom')\n subplot.xaxis.set_label_position('bottom')\n xaxis = 'bottom'\n elif (ymax < 0 and ymax > ymin) or (ymin < 0 and ymin > ymax):\n subplot.spines['bottom'].set_visible(False)\n subplot.spines['top'].set_position(('outward', 10))\n subplot.xaxis.set_ticks_position('top')\n subplot.xaxis.set_label_position('top')\n xaxis = 'top'\n else:\n subplot.spines['bottom'].set_position('zero')\n subplot.xaxis.set_ticks_position('bottom')\n subplot.xaxis.set_label_position('bottom')\n subplot.spines['top'].set_visible(False)\n xmiddle = True\n xaxis = 'bottom'\n\n # For now, set the formatter to the old one, since that is\n # sort of what we are used to. We should eventually look at\n # the default one to see if we like it better.\n\n (subplot, x_locator, y_locator,\n x_formatter, y_formatter) = self._matplotlib_tick_formatter(\n subplot, base=(basex, basey),\n locator_options=locator_options,\n scale=(xscale, yscale),\n tick_formatter=tick_formatter, ticks=ticks,\n xmax=xmax, xmin=xmin, ymax=ymax, ymin=ymin)\n\n # Make ticklines go on both sides of the axes\n # if xmiddle:\n # for t in subplot.xaxis.get_majorticklines():\n # t.set_marker(\"|\")\n # t.set_markersize(8)\n # for t in subplot.xaxis.get_minorticklines():\n # t.set_marker(\"|\")\n # t.set_markersize(4)\n\n # if ymiddle:\n # for t in subplot.yaxis.get_majorticklines():\n # t.set_marker(\"|\")\n # t.set_markersize(8)\n # for t in subplot.yaxis.get_minorticklines():\n # t.set_marker(\"|\")\n # t.set_markersize(4)\n\n # Make the zero tick labels disappear if the axes cross\n # inside the picture, but only if log scale is not used\n if (xmiddle and ymiddle and xscale == 'linear' == yscale):\n from sage.plot.plot import SelectiveFormatter\n subplot.yaxis.set_major_formatter(SelectiveFormatter(\n subplot.yaxis.get_major_formatter(), skip_values=[0]))\n subplot.xaxis.set_major_formatter(SelectiveFormatter(\n subplot.xaxis.get_major_formatter(), skip_values=[0]))\n\n else:\n for spine in subplot.spines.values():\n spine.set_visible(False)\n from matplotlib.ticker import NullFormatter, NullLocator\n subplot.xaxis.set_major_formatter(NullFormatter())\n subplot.yaxis.set_major_formatter(NullFormatter())\n subplot.xaxis.set_major_locator(NullLocator())\n subplot.yaxis.set_major_locator(NullLocator())\n\n if frame or axes:\n # Make minor tickmarks, unless we specify fixed ticks or no ticks\n # We do this change only on linear scale, otherwise matplotlib\n # errors out with a memory error.\n from matplotlib.ticker import (AutoMinorLocator, FixedLocator,\n LogLocator, NullLocator)\n if isinstance(x_locator, (NullLocator, FixedLocator)):\n subplot.xaxis.set_minor_locator(NullLocator())\n elif xscale == 'linear':\n subplot.xaxis.set_minor_locator(AutoMinorLocator())\n else: # log scale\n from sage.arith.srange import srange\n base_inv = 1.0 / basex\n subs = [float(_) for _ in srange(2 * base_inv, 1, base_inv)]\n subplot.xaxis.set_minor_locator(LogLocator(base=basex,\n subs=subs))\n if isinstance(y_locator, (NullLocator, FixedLocator)):\n subplot.yaxis.set_minor_locator(NullLocator())\n elif yscale == 'linear':\n subplot.yaxis.set_minor_locator(AutoMinorLocator())\n else: # log scale\n from sage.arith.srange import srange\n base_inv = 1.0 / basey\n subs = [float(_) for _ in srange(2 * base_inv, 1, base_inv)]\n subplot.yaxis.set_minor_locator(LogLocator(base=basey,\n subs=subs))\n # Set the color and fontsize of ticks\n subplot.tick_params(color=self._axes_color,\n labelcolor=self._tick_label_color,\n labelsize=self._fontsize, which='both')\n\n if gridlines is not None:\n if isinstance(gridlines, (list, tuple)):\n vgridlines, hgridlines = gridlines\n else:\n hgridlines = gridlines\n vgridlines = gridlines\n\n if gridlinesstyle is None:\n # Set up the default grid style\n gridlinesstyle = dict(color='black', linestyle=':',\n linewidth=0.5)\n\n vgridstyle = gridlinesstyle.copy()\n if vgridlinesstyle is not None:\n vgridstyle.update(vgridlinesstyle)\n\n hgridstyle = gridlinesstyle.copy()\n if hgridlinesstyle is not None:\n hgridstyle.update(hgridlinesstyle)\n\n if hgridlines == 'minor':\n hgridstyle['which'] = 'both'\n if vgridlines == 'minor':\n vgridstyle['which'] = 'both'\n\n if not isinstance(hgridlines, str) and isinstance(hgridlines, Iterable):\n hlines = iter(hgridlines)\n hgridstyle.pop(\"minor\", None)\n for hline in hlines:\n if isinstance(hline, (list, tuple)):\n hl, style = hline\n st = hgridstyle.copy()\n st.update(style)\n else:\n hl = hline\n st = hgridstyle\n subplot.axhline(hl, **st)\n else:\n if hgridlines not in (None, False):\n subplot.yaxis.grid(True, **hgridstyle)\n\n if not isinstance(vgridlines, str) and isinstance(vgridlines, Iterable):\n vlines = iter(vgridlines)\n vgridstyle.pop(\"minor\", None)\n for vline in vlines:\n if isinstance(vline, (list, tuple)):\n vl, style = vline\n st = vgridstyle.copy()\n st.update(style)\n else:\n vl = vline\n st = vgridstyle\n subplot.axvline(vl, **st)\n else:\n if vgridlines not in (None, False):\n subplot.xaxis.grid(True, **vgridstyle)\n\n if self._axes_labels is not None:\n label_options = {}\n label_options['color'] = self._axes_label_color\n label_options['size'] = int(self._axes_labels_size * self._fontsize)\n subplot.set_xlabel(self._axes_labels[0], **label_options)\n subplot.set_ylabel(self._axes_labels[1], **label_options)\n\n if axes is True and frame is False:\n # We set the label positions according to where we are\n # drawing the axes.\n if xaxis == 'bottom':\n yaxis_labely = subplot.get_ylim()[1]\n yaxis_labeloffset = 8\n yaxis_vert = 'bottom'\n xaxis_labely = 0\n xaxis_vert = 'baseline'\n else:\n yaxis_labely = subplot.get_ylim()[0]\n yaxis_labeloffset = -8\n yaxis_vert = 'top'\n xaxis_labely = 1\n xaxis_vert = 'top'\n\n if yaxis == 'left':\n xaxis_labelx = subplot.get_xlim()[1]\n xaxis_labeloffset = 8\n xaxis_horiz = 'left'\n yaxis_labelx = 0\n else:\n xaxis_labelx = subplot.get_xlim()[0]\n xaxis_labeloffset = -8\n xaxis_horiz = 'right'\n yaxis_labelx = 1\n\n from matplotlib.transforms import offset_copy\n xlabel = subplot.xaxis.get_label()\n xlabel.set_horizontalalignment(xaxis_horiz)\n xlabel.set_verticalalignment(xaxis_vert)\n trans = subplot.spines[xaxis].get_transform()\n labeltrans = offset_copy(trans, figure, x=xaxis_labeloffset,\n y=0, units='points')\n subplot.xaxis.set_label_coords(x=xaxis_labelx,\n y=xaxis_labely, transform=labeltrans)\n\n ylabel = subplot.yaxis.get_label()\n ylabel.set_horizontalalignment('center')\n ylabel.set_verticalalignment(yaxis_vert)\n ylabel.set_rotation('horizontal')\n trans = subplot.spines[yaxis].get_transform()\n labeltrans = offset_copy(trans, figure, x=0,\n y=yaxis_labeloffset, units='points')\n subplot.yaxis.set_label_coords(x=yaxis_labelx,\n y=yaxis_labely, transform=labeltrans)\n\n # This option makes the xlim and ylim limits not take effect\n # todo: figure out which limits were specified, and let the\n # free limits autoscale\n # subplot.autoscale_view(tight=True)\n if title is not None:\n if title_pos is not None:\n if (not isinstance(title_pos, (list, tuple)) or\n len(title_pos) != 2):\n raise ValueError(\"'title_pos' must be a list or tuple \"\n \"of two real numbers.\")\n title_pos = (float(title_pos[0]), float(title_pos[1]))\n\n if (frame) or (axes_labels is None):\n if title_pos is not None:\n subplot.set_title(title, fontsize=fontsize,\n position=title_pos)\n else:\n subplot.set_title(title, fontsize=fontsize)\n else:\n # frame is false axes is not None, and neither is axes_labels\n # Then, the title is moved up to avoid overlap with axes labels\n if title_pos is None:\n title_pos = (0.5, 1.05)\n subplot.set_title(title, fontsize=fontsize, position=title_pos)\n\n for g in self._objects:\n g.set_options(old_opts[g])\n\n return figure\n\n def save_image(self, filename=None, *args, **kwds):\n r\"\"\"\n Save an image representation of self.\n\n The image type is determined by the extension of the filename.\n For example, this could be ``.png``, ``.jpg``, ``.gif``,\n ``.pdf``, ``.svg``. Currently this is implemented by calling\n the :meth:`save` method of self, passing along all arguments\n and keywords.\n\n .. NOTE::\n\n Not all image types are necessarily implemented for all\n graphics types. See :meth:`save` for more details.\n\n EXAMPLES::\n\n sage: c = circle((1,1), 1, color='red')\n sage: filename = os.path.join(SAGE_TMP, 'test.png')\n sage: c.save_image(filename, xmin=-1, xmax=3, ymin=-1, ymax=3)\n \"\"\"\n self.save(filename, *args, **kwds)\n\n # filename argument is written explicitly so that it can be used as a\n # positional one, which is a very likely usage for this function.\n\n @suboptions('legend', **LEGEND_OPTIONS)\n def save(self, filename, **kwds):\n r\"\"\"\n Save the graphics to an image file.\n\n INPUT:\n\n - ``filename`` -- string. The filename and the image format\n given by the extension, which can be one of the following:\n\n * ``.eps``,\n\n * ``.pdf``,\n\n * ``.pgf``,\n\n * ``.png``,\n\n * ``.ps``,\n\n * ``.sobj`` (for a Sage object you can load later),\n\n * ``.svg``,\n\n * empty extension will be treated as ``.sobj``.\n\n All other keyword arguments will be passed to the plotter.\n\n OUTPUT:\n\n - none.\n\n EXAMPLES::\n\n sage: c = circle((1,1), 1, color='red')\n sage: filename = os.path.join(SAGE_TMP, 'test.png')\n sage: c.save(filename, xmin=-1, xmax=3, ymin=-1, ymax=3)\n\n To make a figure bigger or smaller, use ``figsize``::\n\n sage: c.save(filename, figsize=5, xmin=-1, xmax=3, ymin=-1, ymax=3)\n\n By default, the figure grows to include all of the graphics and text,\n so the final image may not be exactly the figure size you specified.\n If you want a figure to be exactly a certain size, specify the keyword\n ``fig_tight=False``::\n\n sage: c.save(filename, figsize=[8,4], fig_tight=False,\n ....: xmin=-1, xmax=3, ymin=-1, ymax=3)\n\n You can also pass extra options to the plot command instead of this\n method, e.g. ::\n\n sage: plot(x^2 - 5, (x, 0, 5), ymin=0).save(tmp_filename(ext='.png'))\n\n will save the same plot as the one shown by this command::\n\n sage: plot(x^2 - 5, (x, 0, 5), ymin=0)\n Graphics object consisting of 1 graphics primitive\n\n (This test verifies that :trac:`8632` is fixed.)\n\n TESTS:\n\n Legend labels should save correctly::\n\n sage: P = plot(x,(x,0,1),legend_label='$xyz$')\n sage: P.set_legend_options(back_color=(1,0,0))\n sage: P.set_legend_options(loc=7)\n sage: filename=os.path.join(SAGE_TMP, 'test.png')\n sage: P.save(filename)\n\n This plot should save with the frame shown, showing :trac:`7524`\n is fixed (same issue as :trac:`7981` and :trac:`8632`)::\n\n sage: var('x,y')\n (x, y)\n sage: a = plot_vector_field((x,-y),(x,-1,1),(y,-1,1))\n sage: filename=os.path.join(SAGE_TMP, 'test2.png')\n sage: a.save(filename)\n\n The following plot should show the axes; fixes :trac:`14782` ::\n\n sage: plot(x^2, (x, 1, 2), ticks=[[], []])\n Graphics object consisting of 1 graphics primitive\n\n \"\"\"\n options = dict()\n options.update(self.SHOW_OPTIONS)\n options.update(self._extra_kwds)\n options.update(kwds)\n dpi = options.pop('dpi')\n transparent = options.pop('transparent')\n fig_tight = options.pop('fig_tight')\n\n ext = os.path.splitext(filename)[1].lower()\n\n if ext in ['', '.sobj']:\n SageObject.save(self, filename)\n elif ext not in ALLOWED_EXTENSIONS:\n raise ValueError(\"allowed file extensions for images are '\" +\n \"', '\".join(ALLOWED_EXTENSIONS) + \"'!\")\n else:\n from matplotlib import rcParams\n rc_backup = (rcParams['ps.useafm'], rcParams['pdf.use14corefonts'],\n rcParams['text.usetex']) # save the rcParams\n figure = self.matplotlib(**options)\n # You can output in PNG, PS, EPS, PDF, PGF, or SVG format, depending\n # on the file extension.\n # PGF is handled by a different backend\n if ext == '.pgf':\n from sage.features.latex import xelatex, pdflatex, lualatex\n latex_implementations = []\n if xelatex().is_present():\n latex_implementations.append('xelatex')\n if pdflatex().is_present():\n latex_implementations.append('pdflatex')\n if lualatex().is_present():\n latex_implementations.append('lualatex')\n if not latex_implementations:\n raise ValueError(\"Matplotlib requires either xelatex, \"\n \"lualatex, or pdflatex.\")\n if latex_implementations[0] == \"pdflatex\":\n # use pdflatex and set font encoding as per\n # matplotlib documentation:\n # https://matplotlib.org/users/pgf.html#pgf-tutorial\n pgf_options = {\"pgf.texsystem\": \"pdflatex\",\n \"pgf.preamble\": [\n r\"\\usepackage[utf8x]{inputenc}\",\n r\"\\usepackage[T1]{fontenc}\"]}\n else:\n pgf_options = {\n \"pgf.texsystem\": latex_implementations[0],\n }\n from matplotlib import rcParams\n rcParams.update(pgf_options)\n from matplotlib.backends.backend_pgf import FigureCanvasPgf\n figure.set_canvas(FigureCanvasPgf(figure))\n\n # matplotlib looks at the file extension to see what the renderer should be.\n # The default is FigureCanvasAgg for PNG's because this is by far the most\n # common type of files rendered, like in the notebook, for example.\n # if the file extension is not '.png', then matplotlib will handle it.\n else:\n from matplotlib.backends.backend_agg import FigureCanvasAgg\n figure.set_canvas(FigureCanvasAgg(figure))\n # this messes up the aspect ratio!\n # figure.canvas.mpl_connect('draw_event', pad_for_tick_labels)\n\n # tight_layout adjusts the *subplot* parameters so ticks aren't cut off, etc.\n figure.tight_layout()\n\n opts = dict(dpi=dpi, transparent=transparent)\n if fig_tight is True:\n opts['bbox_inches'] = 'tight'\n if self._bbox_extra_artists:\n opts['bbox_extra_artists'] = self._bbox_extra_artists\n\n figure.savefig(filename, **opts)\n\n # Restore the rcParams to the original, possibly user-set values\n (rcParams['ps.useafm'], rcParams['pdf.use14corefonts'],\n rcParams['text.usetex']) = rc_backup\n\n def _latex_(self, **kwds):\n \"\"\"\n Return a string plotting ``self`` with PGF.\n\n INPUT:\n\n All keyword arguments will be passed to the plotter.\n\n OUTPUT:\n\n A string of PGF commands to plot ``self``\n\n EXAMPLES::\n\n sage: L = line([(0,0), (1,1)], axes=False)\n sage: L._latex_() # not tested\n '%% Creator: Matplotlib, PGF backend...\n \"\"\"\n tmpfilename = tmp_filename(ext='.pgf')\n self.save(filename=tmpfilename, **kwds)\n with open(tmpfilename, \"r\") as tmpfile:\n latex_list = tmpfile.readlines()\n from sage.misc.latex import latex\n latex.add_package_to_preamble_if_available('pgf')\n return ''.join(latex_list)\n\n def description(self):\n r\"\"\"\n Print a textual description to stdout.\n\n This method is mostly used for doctests.\n\n EXAMPLES::\n\n sage: print(polytopes.hypercube(2).plot().description())\n Polygon defined by 4 points: [(-1.0, -1.0), (1.0, -1.0), (1.0, 1.0), (-1.0, 1.0)]\n Line defined by 2 points: [(-1.0, 1.0), (-1.0, -1.0)]\n Line defined by 2 points: [(1.0, -1.0), (-1.0, -1.0)]\n Line defined by 2 points: [(1.0, -1.0), (1.0, 1.0)]\n Line defined by 2 points: [(1.0, 1.0), (-1.0, 1.0)]\n Point set defined by 4 point(s): [(1.0, -1.0), (1.0, 1.0), (-1.0, 1.0), (-1.0, -1.0)]\n \"\"\"\n data = []\n for g in self:\n g_zorder = g.options().get('zorder', 0)\n if hasattr(g, 'xdata'):\n g_str = '{0}:\\t{1}'.format(g, list(zip(g.xdata, g.ydata)))\n else:\n g_str = repr(g)\n data.append([g_zorder, g_str, g])\n data.sort()\n return '\\n'.join(g[1] for g in data)\n\n def inset(self, graphics, pos=None, fontsize=None):\n r\"\"\"\n Add a graphics object as an inset.\n\n INPUT:\n\n - ``graphics`` -- the graphics object (instance of :class:`Graphics`)\n to be added as an inset to the current graphics\n\n - ``pos`` -- (default: ``None``) 4-tuple\n ``(left, bottom, width, height)``\n specifying the location and size of the inset on the final figure,\n all quantities being in fractions of the figure width and height; if\n ``None``, the value ``(0.7, 0.7, 0.2, 0.2)`` is used\n\n - ``fontsize`` -- (default: ``None``) integer, font size (in points)\n for the inset; if ``None``, the value of 6 points is used, unless\n ``fontsize`` has been explicitly set in the construction of\n ``graphics`` (in this case, it is not overwritten here)\n\n OUTPUT:\n\n - instance of :class:`~sage.plot.multigraphics.MultiGraphics`\n\n EXAMPLES::\n\n sage: f(x) = x^2*sin(1/x)\n sage: g1 = plot(f(x), (x, -2, 2), axes_labels=['$x$', '$y$'])\n sage: g2 = plot(f(x), (x, -0.3, 0.3), axes_labels=['$x$', '$y$'],\n ....: frame=True)\n sage: g1.inset(g2)\n Multigraphics with 2 elements\n\n .. PLOT::\n\n f = (x**2*sin(1/x)).function(x)\n g1 = plot(f(x), (x, -2, 2), axes_labels=['$x$', '$y$'])\n g2 = plot(f(x), (x, -0.3, 0.3), axes_labels=['$x$', '$y$'], \\\n frame=True)\n sphinx_plot(g1.inset(g2))\n\n Using non-default values for the position/size and the font size::\n\n sage: g1.inset(g2, pos=(0.15, 0.7, 0.25, 0.25), fontsize=8)\n Multigraphics with 2 elements\n\n .. PLOT::\n\n f = (x**2*sin(1/x)).function(x)\n g1 = plot(f(x), (x, -2, 2), axes_labels=['$x$', '$y$'])\n g2 = plot(f(x), (x, -0.3, 0.3), axes_labels=['$x$', '$y$'], \\\n frame=True)\n sphinx_plot(g1.inset(g2, pos=(0.15, 0.7, 0.25, 0.25), fontsize=8))\n\n We can add another inset by invoking ``inset`` on the last output::\n\n sage: g1g2 = _\n sage: g3 = plot(f(x), (x, -0.05, 0.05), axes_labels=['$x$', '$y$'],\n ....: frame=True)\n sage: g1g2.inset(g3, pos=(0.65, 0.12, 0.25, 0.25))\n Multigraphics with 3 elements\n\n .. PLOT::\n\n f = (x**2*sin(1/x)).function(x)\n g1 = plot(f(x), (x, -2, 2), axes_labels=['$x$', '$y$'])\n g2 = plot(f(x), (x, -0.3, 0.3), axes_labels=['$x$', '$y$'], \\\n frame=True)\n g1g2 = g1.inset(g2, pos=(0.15, 0.7, 0.25, 0.25), fontsize=8)\n g3 = plot(f(x), (x, -0.05, 0.05), axes_labels=['$x$', '$y$'], \\\n frame=True)\n sphinx_plot(g1g2.inset(g3, pos=(0.65, 0.12, 0.25, 0.25)))\n\n \"\"\"\n from .multigraphics import MultiGraphics\n if pos is None:\n pos = (0.7, 0.7, 0.2, 0.2)\n pos0 = (0.05, 0.05, 0.9, 0.9)\n if fontsize is not None:\n graphics._extra_kwds['fontsize'] = fontsize\n elif 'fontsize' not in graphics._extra_kwds:\n graphics._extra_kwds['fontsize'] = 6\n return MultiGraphics([(self, pos0), (graphics, pos)])\n\n\n# Deprecation notice for GraphicsArray import\ndef GraphicsArray(*args, **kwargs):\n r\"\"\"\n This is deprecated (see :trac:`28675`).\n Use :class:`sage.plot.multigraphics.GraphicsArray` instead.\n\n TESTS::\n\n sage: from sage.plot.graphics import GraphicsArray\n sage: c = circle((0,0), 1)\n sage: G = GraphicsArray([c, c])\n doctest:...: DeprecationWarning: GraphicsArray must be imported from sage.plot.multigraphics and no longer from sage.plot.graphics.\n See https://trac.sagemath.org/28675 for details.\n sage: G\n Graphics Array of size 1 x 2\n\n \"\"\"\n from .multigraphics import GraphicsArray as NewGraphicsArray\n from sage.misc.superseded import deprecation\n deprecation(28675, \"GraphicsArray must be imported from \"\n \"sage.plot.multigraphics and no longer from \"\n \"sage.plot.graphics.\")\n return NewGraphicsArray(*args, **kwargs)\n"
] |
[
[
"matplotlib.backends.backend_pgf.FigureCanvasPgf",
"matplotlib.ticker.FixedFormatter",
"matplotlib.figure.Figure",
"matplotlib.backends.backend_agg.FigureCanvasAgg",
"matplotlib.ticker.AutoMinorLocator",
"matplotlib.ticker.NullFormatter",
"matplotlib.ticker.LogLocator",
"matplotlib.transforms.offset_copy",
"matplotlib.rcParams.update",
"matplotlib.ticker.MaxNLocator",
"matplotlib.ticker.ScalarFormatter",
"matplotlib.ticker.FixedLocator",
"matplotlib.ticker.NullLocator",
"matplotlib.pyplot.style.use",
"matplotlib.ticker.LogFormatterMathtext"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
felipeblazing/cudf
|
[
"ccc6883c5ec1cdd1f5070f0bba561587f3bd701e",
"ccc6883c5ec1cdd1f5070f0bba561587f3bd701e"
] |
[
"python/cudf/tests/test_dataframe.py",
"python/cudf/dataframe/index.py"
] |
[
"# Copyright (c) 2018, NVIDIA CORPORATION.\n\nimport pytest\n\nimport numpy as np\nimport pandas as pd\nimport pyarrow as pa\n\nfrom librmm_cffi import librmm as rmm\n\nimport cudf as gd\nfrom cudf.dataframe.dataframe import Series, DataFrame\nfrom cudf.dataframe.buffer import Buffer\nfrom cudf.settings import set_options\n\nfrom itertools import combinations\n\nfrom . import utils\n\n\ndef test_buffer_basic():\n n = 10\n buf = Buffer(np.arange(n, dtype=np.float64))\n assert buf.size == n\n assert buf.capacity == n\n np.testing.assert_equal(buf.mem.copy_to_host(),\n np.arange(n, dtype=np.float64))\n\n\ndef test_buffer_append():\n n = 10\n expected = np.arange(n, dtype=np.float64)\n buf = Buffer(expected, size=n - 4, capacity=n)\n assert buf.size == n - 4\n assert buf.capacity == n\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n np.testing.assert_equal(buf.to_array(), np.arange(n - 4, dtype=np.float64))\n\n # Buffer.append\n buf.append(1.23)\n expected[n - 4] = 1.23\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n assert buf.size == n - 3\n assert buf.capacity == n\n\n # Buffer.extend\n buf.extend(np.asarray([2, 3]))\n expected[n - 3] = 2\n expected[n - 2] = 3\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n assert buf.size == n - 1\n assert buf.capacity == n\n\n # Test out-of-bound\n with pytest.raises(MemoryError):\n buf.extend(np.asarray([2, 3]))\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n assert buf.size == n - 1\n assert buf.capacity == n\n\n # Append to last slot\n buf.append(10.125)\n expected[n - 1] = 10.125\n np.testing.assert_equal(buf.mem.copy_to_host(), expected)\n assert buf.size == n\n assert buf.capacity == n\n\n with pytest.raises(MemoryError):\n buf.append(987654)\n\n np.testing.assert_equal(buf.to_array(), expected)\n assert buf.size == n\n assert buf.capacity == n\n\n\ndef test_series_basic():\n # Make series from buffer\n a1 = np.arange(10, dtype=np.float64)\n series = Series(a1)\n assert len(series) == 10\n np.testing.assert_equal(series.to_array(), np.hstack([a1]))\n\n # Add new buffer\n a2 = np.arange(5)\n series = series.append(a2)\n assert len(series) == 15\n np.testing.assert_equal(series.to_array(), np.hstack([a1, a2]))\n\n # Ensure appending to previous buffer\n a3 = np.arange(3)\n series = series.append(a3)\n assert len(series) == 18\n a4 = np.hstack([a1, a2, a3])\n np.testing.assert_equal(series.to_array(), a4)\n\n\ndef test_series_indexing():\n a1 = np.arange(20)\n series = Series(a1)\n # Indexing\n sr1 = series[:12]\n assert sr1.null_count == 0\n np.testing.assert_equal(sr1.to_array(), a1[:12])\n sr2 = sr1[3:]\n assert sr2.null_count == 0\n np.testing.assert_equal(sr2.to_array(), a1[3:12])\n # Index with stride\n sr3 = sr2[::2]\n assert sr3.null_count == 0\n np.testing.assert_equal(sr3.to_array(), a1[3:12:2])\n\n\ndef test_dataframe_basic():\n np.random.seed(0)\n df = DataFrame()\n\n # Populate with cuda memory\n df['keys'] = rmm.to_device(np.arange(10, dtype=np.float64))\n np.testing.assert_equal(df['keys'].to_array(), np.arange(10))\n assert len(df) == 10\n\n # Populate with numpy array\n rnd_vals = np.random.random(10)\n df['vals'] = rnd_vals\n np.testing.assert_equal(df['vals'].to_array(), rnd_vals)\n assert len(df) == 10\n assert tuple(df.columns) == ('keys', 'vals')\n\n # Make another dataframe\n df2 = DataFrame()\n df2['keys'] = np.array([123], dtype=np.float64)\n df2['vals'] = np.array([321], dtype=np.float64)\n\n # Concat\n df = gd.concat([df, df2])\n assert len(df) == 11\n\n hkeys = np.asarray(np.arange(10, dtype=np.float64).tolist() + [123])\n hvals = np.asarray(rnd_vals.tolist() + [321])\n\n np.testing.assert_equal(df['keys'].to_array(), hkeys)\n np.testing.assert_equal(df['vals'].to_array(), hvals)\n\n # As matrix\n mat = df.as_matrix()\n\n expect = np.vstack([hkeys, hvals]).T\n\n print(expect)\n print(mat)\n np.testing.assert_equal(mat, expect)\n\n\ndef test_dataframe_column_name_indexing():\n df = DataFrame()\n data = np.asarray(range(10), dtype=np.int32)\n df['a'] = data\n df[1] = data\n np.testing.assert_equal(df['a'].to_array(),\n np.asarray(range(10), dtype=np.int32))\n np.testing.assert_equal(df[1].to_array(),\n np.asarray(range(10), dtype=np.int32))\n\n pdf = pd.DataFrame()\n nelem = 10\n pdf['key1'] = np.random.randint(0, 5, nelem)\n pdf['key2'] = np.random.randint(0, 3, nelem)\n pdf[1] = np.arange(1, 1 + nelem)\n pdf[2] = np.random.random(nelem)\n df = DataFrame.from_pandas(pdf)\n for i in range(1, len(pdf.columns)+1):\n for idx in combinations(pdf.columns, i):\n assert(pdf[list(idx)].equals(df[list(idx)].to_pandas()))\n\n\ndef test_dataframe_column_add_drop():\n df = DataFrame()\n data = np.asarray(range(10))\n df['a'] = data\n df['b'] = data\n assert tuple(df.columns) == ('a', 'b')\n del df['a']\n assert tuple(df.columns) == ('b',)\n df['c'] = data\n assert tuple(df.columns) == ('b', 'c')\n df['a'] = data\n assert tuple(df.columns) == ('b', 'c', 'a')\n\n\[email protected]('nelem', [0, 3, 100, 1000])\ndef test_dataframe_astype(nelem):\n df = DataFrame()\n data = np.asarray(range(nelem), dtype=np.int32)\n df['a'] = data\n assert df['a'].dtype is np.dtype(np.int32)\n df['b'] = df['a'].astype(np.float32)\n assert df['b'].dtype is np.dtype(np.float32)\n np.testing.assert_equal(df['a'].to_array(), df['b'].to_array())\n\n\ndef test_dataframe_slicing():\n df = DataFrame()\n size = 123\n df['a'] = ha = np.random.randint(low=0, high=100, size=size)\\\n .astype(np.int32)\n df['b'] = hb = np.random.random(size).astype(np.float32)\n df['c'] = hc = np.random.randint(low=0, high=100, size=size)\\\n .astype(np.int64)\n df['d'] = hd = np.random.random(size).astype(np.float64)\n\n # Row slice first 10\n first_10 = df[:10]\n assert len(first_10) == 10\n assert tuple(first_10.columns) == ('a', 'b', 'c', 'd')\n np.testing.assert_equal(first_10['a'].to_array(), ha[:10])\n np.testing.assert_equal(first_10['b'].to_array(), hb[:10])\n np.testing.assert_equal(first_10['c'].to_array(), hc[:10])\n np.testing.assert_equal(first_10['d'].to_array(), hd[:10])\n del first_10\n\n # Row slice last 10\n last_10 = df[-10:]\n assert len(last_10) == 10\n assert tuple(last_10.columns) == ('a', 'b', 'c', 'd')\n np.testing.assert_equal(last_10['a'].to_array(), ha[-10:])\n np.testing.assert_equal(last_10['b'].to_array(), hb[-10:])\n np.testing.assert_equal(last_10['c'].to_array(), hc[-10:])\n np.testing.assert_equal(last_10['d'].to_array(), hd[-10:])\n del last_10\n\n # Row slice [begin:end]\n begin = 7\n end = 121\n subrange = df[begin:end]\n assert len(subrange) == end - begin\n assert tuple(subrange.columns) == ('a', 'b', 'c', 'd')\n np.testing.assert_equal(subrange['a'].to_array(), ha[begin:end])\n np.testing.assert_equal(subrange['b'].to_array(), hb[begin:end])\n np.testing.assert_equal(subrange['c'].to_array(), hc[begin:end])\n np.testing.assert_equal(subrange['d'].to_array(), hd[begin:end])\n del subrange\n\n\ndef test_dataframe_loc():\n df = DataFrame()\n size = 123\n df['a'] = ha = np.random.randint(low=0, high=100, size=size)\\\n .astype(np.int32)\n df['b'] = hb = np.random.random(size).astype(np.float32) # noqa: F841\n df['c'] = hc = np.random.randint(low=0, high=100, size=size)\\\n .astype(np.int64)\n df['d'] = hd = np.random.random(size).astype(np.float64)\n\n # Full slice\n full = df.loc[:, ['c']]\n assert tuple(full.columns) == ('c',)\n np.testing.assert_equal(full['c'].to_array(), hc)\n\n begin = 117\n end = 122\n fewer = df.loc[begin:end, ['c', 'd', 'a']]\n assert len(fewer) == end - begin + 1\n assert tuple(fewer.columns) == ('c', 'd', 'a')\n np.testing.assert_equal(fewer['a'].to_array(), ha[begin:end + 1])\n np.testing.assert_equal(fewer['c'].to_array(), hc[begin:end + 1])\n np.testing.assert_equal(fewer['d'].to_array(), hd[begin:end + 1])\n del fewer\n\n # Make int64 index\n offset = 50\n df2 = df[offset:]\n begin = 117\n end = 122\n fewer = df2.loc[begin:end, ['c', 'd', 'a']]\n assert len(fewer) == end - begin + 1\n assert tuple(fewer.columns) == ('c', 'd', 'a')\n np.testing.assert_equal(fewer['a'].to_array(), ha[begin:end + 1])\n np.testing.assert_equal(fewer['c'].to_array(), hc[begin:end + 1])\n np.testing.assert_equal(fewer['d'].to_array(), hd[begin:end + 1])\n\n\ndef test_dataframe_to_string():\n with set_options(formatting={'nrows': 5, 'ncols': 8}):\n # Test basic\n df = DataFrame([('a', [1, 2, 3, 4, 5, 6]),\n ('b', [11, 12, 13, 14, 15, 16])])\n string = str(df)\n print(string)\n assert string.splitlines()[-1] == '[1 more rows]'\n\n # Test skipped columns\n df = DataFrame([('a', [1, 2, 3, 4, 5, 6]),\n ('b', [11, 12, 13, 14, 15, 16]),\n ('c', [11, 12, 13, 14, 15, 16]),\n ('d', [11, 12, 13, 14, 15, 16])])\n string = df.to_string(ncols=3)\n print(string)\n assert string.splitlines()[-2] == '[1 more rows]'\n assert string.splitlines()[-1] == '[1 more columns]'\n\n # Test masked\n df = DataFrame([('a', [1, 2, 3, 4, 5, 6]),\n ('b', [11, 12, 13, 14, 15, 16])])\n\n data = np.arange(6)\n mask = np.zeros(1, dtype=np.uint8)\n mask[0] = 0b00101101\n\n masked = Series.from_masked_array(data, mask)\n assert masked.null_count == 2\n df['c'] = masked\n\n # check data\n values = list(masked)\n validids = [0, 2, 3, 5]\n densearray = masked.to_array()\n np.testing.assert_equal(data[validids], densearray)\n # valid position is corret\n for i in validids:\n assert data[i] == values[i]\n # null position is correct\n for i in range(len(values)):\n if i not in validids:\n assert values[i] is None\n\n got = df.to_string(nrows=None)\n print(got)\n expect = '''\n a b c\n0 1 11 0\n1 2 12\n2 3 13 2\n3 4 14 3\n4 5 15\n5 6 16 5\n'''\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_to_string_wide():\n # Test basic\n df = DataFrame()\n for i in range(100):\n df['a{}'.format(i)] = list(range(3))\n got = df.to_string(ncols=8)\n print(got)\n expect = '''\n a0 a1 a2 a3 a4 a5 a6 ... a99\n0 0 0 0 0 0 0 0 ... 0\n1 1 1 1 1 1 1 1 ... 1\n2 2 2 2 2 2 2 2 ... 2\n[92 more columns]\n'''\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_empty_to_string():\n # Test for printing empty dataframe\n df = DataFrame()\n got = df.to_string()\n print(got)\n expect = \"Empty DataFrame\\nColumns: []\\nIndex: []\\n\"\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_emptycolumns_to_string():\n # Test for printing dataframe having empty columns\n df = DataFrame()\n df['a'] = []\n df['b'] = []\n got = df.to_string()\n print(got)\n expect = \"Empty DataFrame\\nColumns: ['a', 'b']\\nIndex: []\\n\"\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_copy():\n # Test for copying the dataframe using python copy pkg\n from copy import copy\n df = DataFrame()\n df['a'] = [1, 2, 3]\n df2 = copy(df)\n df2['b'] = [4, 5, 6]\n got = df.to_string()\n print(got)\n expect = '''\n a\n0 1\n1 2\n2 3\n'''\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_copy_shallow():\n # Test for copy dataframe using class method\n df = DataFrame()\n df['a'] = [1, 2, 3]\n df2 = df.copy()\n df2['b'] = [4, 2, 3]\n got = df.to_string()\n print(got)\n expect = '''\n a\n0 1\n1 2\n2 3\n'''\n # values should match despite whitespace difference\n assert got.split() == expect.split()\n\n\ndef test_dataframe_dtypes():\n dtypes = pd.Series([np.int32, np.float32, np.float64],\n index=['c', 'a', 'b'])\n df = DataFrame([(k, np.ones(10, dtype=v))\n for k, v in dtypes.iteritems()])\n assert df.dtypes.equals(dtypes)\n\n\ndef test_dataframe_dir_and_getattr():\n df = DataFrame([('a', np.ones(10)),\n ('b', np.ones(10)),\n ('not an id', np.ones(10)),\n ('oop$', np.ones(10))])\n o = dir(df)\n assert {'a', 'b'}.issubset(o)\n assert 'not an id' not in o\n assert 'oop$' not in o\n\n # Getattr works\n assert df.a is df['a']\n assert df.b is df['b']\n with pytest.raises(AttributeError):\n df.not_a_column\n\n\[email protected]('order', ['C', 'F'])\ndef test_dataframe_as_gpu_matrix(order):\n df = DataFrame()\n\n nelem = 123\n for k in 'abcd':\n df[k] = np.random.random(nelem)\n\n # Check all columns\n mat = df.as_gpu_matrix(order=order).copy_to_host()\n assert mat.shape == (nelem, 4)\n for i, k in enumerate(df.columns):\n np.testing.assert_array_equal(df[k].to_array(), mat[:, i])\n\n # Check column subset\n mat = df.as_gpu_matrix(order=order, columns=['a', 'c']).copy_to_host()\n assert mat.shape == (nelem, 2)\n\n for i, k in enumerate('ac'):\n np.testing.assert_array_equal(df[k].to_array(), mat[:, i])\n\n\ndef test_dataframe_as_gpu_matrix_null_values():\n df = DataFrame()\n\n nelem = 123\n na = -10000\n\n refvalues = {}\n for k in 'abcd':\n df[k] = data = np.random.random(nelem)\n bitmask = utils.random_bitmask(nelem)\n df[k] = df[k].set_mask(bitmask)\n boolmask = np.asarray(utils.expand_bits_to_bytes(bitmask)[:nelem],\n dtype=np.bool_)\n data[~boolmask] = na\n refvalues[k] = data\n\n # Check null value causes error\n with pytest.raises(ValueError) as raises:\n df.as_gpu_matrix()\n raises.match(\"column 'a' has null values\")\n\n for k in df.columns:\n df[k] = df[k].fillna(na)\n\n mat = df.as_gpu_matrix().copy_to_host()\n for i, k in enumerate(df.columns):\n np.testing.assert_array_equal(refvalues[k], mat[:, i])\n\n\[email protected]('ntake', [0, 1, 10, 123, 122, 200])\ndef test_dataframe_take(ntake):\n np.random.seed(0)\n df = DataFrame()\n\n nelem = 123\n df['ii'] = ii = np.random.randint(0, 20, nelem)\n df['ff'] = ff = np.random.random(nelem)\n\n take_indices = np.random.randint(0, len(df), ntake)\n\n def check(**kwargs):\n out = df.take(take_indices, **kwargs)\n assert len(out) == ntake\n np.testing.assert_array_equal(out.ii.to_array(), ii[take_indices])\n np.testing.assert_array_equal(out.ff.to_array(), ff[take_indices])\n if kwargs.get('ignore_index'):\n np.testing.assert_array_equal(out.index, np.arange(ntake))\n else:\n np.testing.assert_array_equal(out.index, take_indices)\n\n check()\n check(ignore_index=True)\n\n\ndef test_dataframe_append_empty():\n pdf = pd.DataFrame({\n \"key\": [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4],\n \"value\": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n })\n gdf = DataFrame.from_pandas(pdf)\n\n gdf['newcol'] = 100\n pdf['newcol'] = 100\n\n assert len(gdf['newcol']) == len(pdf)\n assert len(pdf['newcol']) == len(pdf)\n pd.testing.assert_frame_equal(gdf.to_pandas(), pdf)\n\n\ndef test_dataframe_setitem_from_masked_object():\n ary = np.random.randn(100)\n mask = np.zeros(100, dtype=bool)\n mask[:20] = True\n np.random.shuffle(mask)\n ary[mask] = np.nan\n\n test1 = Series(ary)\n assert(test1.has_null_mask)\n assert(test1.null_count == 20)\n\n test2 = DataFrame.from_pandas(pd.DataFrame({'a': ary}))\n assert(test2['a'].has_null_mask)\n assert(test2['a'].null_count == 20)\n\n gpu_ary = rmm.to_device(ary)\n test3 = Series(gpu_ary)\n assert(test3.has_null_mask)\n assert(test3.null_count == 20)\n\n test4 = DataFrame()\n lst = [1, 2, None, 4, 5, 6, None, 8, 9]\n test4['lst'] = lst\n assert(test4['lst'].has_null_mask)\n assert(test4['lst'].null_count == 2)\n\n\ndef test_dataframe_append_to_empty():\n pdf = pd.DataFrame()\n pdf['a'] = []\n pdf['b'] = [1, 2, 3]\n\n gdf = DataFrame()\n gdf['a'] = []\n gdf['b'] = [1, 2, 3]\n\n pd.testing.assert_frame_equal(gdf.to_pandas(), pdf)\n\n\ndef test_dataframe_setitem_index_len1():\n gdf = DataFrame()\n gdf['a'] = [1]\n gdf['b'] = gdf.index.as_column()\n\n np.testing.assert_equal(gdf.b.to_array(), [0])\n\n\ndef test_assign():\n gdf = DataFrame({'x': [1, 2, 3]})\n gdf2 = gdf.assign(y=gdf.x + 1)\n assert list(gdf.columns) == ['x']\n assert list(gdf2.columns) == ['x', 'y']\n\n np.testing.assert_equal(gdf2.y.to_array(), [2, 3, 4])\n\n\[email protected]('nrows', [1, 8, 100, 1000])\ndef test_dataframe_hash_columns(nrows):\n gdf = DataFrame()\n data = np.asarray(range(nrows))\n data[0] = data[-1] # make first and last the same\n gdf['a'] = data\n gdf['b'] = gdf.a + 100\n out = gdf.hash_columns(['a', 'b'])\n assert isinstance(out, Series)\n assert len(out) == nrows\n assert out.dtype == np.int32\n\n # Check default\n out_all = gdf.hash_columns()\n np.testing.assert_array_equal(out.to_array(), out_all.to_array())\n\n # Check single column\n out_one = gdf.hash_columns(['a']).to_array()\n # First matches last\n assert out_one[0] == out_one[-1]\n # Equivalent to the Series.hash_values()\n np.testing.assert_array_equal(\n gdf.a.hash_values().to_array(),\n out_one,\n )\n\n\[email protected]('nrows', [3, 10, 100, 1000])\[email protected]('nparts', [1, 2, 8, 13])\[email protected]('nkeys', [1, 2])\ndef test_dataframe_hash_partition(nrows, nparts, nkeys):\n np.random.seed(123)\n gdf = DataFrame()\n keycols = []\n for i in range(nkeys):\n keyname = 'key{}'.format(i)\n gdf[keyname] = np.random.randint(0, 7 - i, nrows)\n keycols.append(keyname)\n gdf['val1'] = np.random.randint(0, nrows * 2, nrows)\n\n got = gdf.partition_by_hash(keycols, nparts=nparts)\n # Must return a list\n assert isinstance(got, list)\n # Must have correct number of partitions\n assert len(got) == nparts\n # All partitions must be DataFrame type\n assert all(isinstance(p, DataFrame) for p in got)\n # Check that all partitions have unique keys\n part_unique_keys = set()\n for p in got:\n if len(p):\n # Take rows of the keycolums and build a set of the key-values\n unique_keys = set(map(tuple, p.as_matrix(columns=keycols)))\n # Ensure that none of the key-values have occurred in other groups\n assert not (unique_keys & part_unique_keys)\n part_unique_keys |= unique_keys\n assert len(part_unique_keys)\n\n\[email protected]('nrows', [3, 10, 50])\ndef test_dataframe_hash_partition_masked_value(nrows):\n gdf = DataFrame()\n gdf['key'] = np.arange(nrows)\n gdf['val'] = np.arange(nrows) + 100\n bitmask = utils.random_bitmask(nrows)\n bytemask = utils.expand_bits_to_bytes(bitmask)\n gdf['val'] = gdf['val'].set_mask(bitmask)\n parted = gdf.partition_by_hash(['key'], nparts=3)\n # Verify that the valid mask is correct\n for p in parted:\n df = p.to_pandas()\n for row in df.itertuples():\n valid = bool(bytemask[row.key])\n expected_value = row.key + 100 if valid else -1\n got_value = row.val\n assert expected_value == got_value\n\n\[email protected]('nrows', [3, 10, 50])\ndef test_dataframe_hash_partition_masked_keys(nrows):\n gdf = DataFrame()\n gdf['key'] = np.arange(nrows)\n gdf['val'] = np.arange(nrows) + 100\n bitmask = utils.random_bitmask(nrows)\n bytemask = utils.expand_bits_to_bytes(bitmask)\n gdf['key'] = gdf['key'].set_mask(bitmask)\n parted = gdf.partition_by_hash(['key'], nparts=3)\n # Verify that the valid mask is correct\n for p in parted:\n df = p.to_pandas()\n for row in df.itertuples():\n valid = bool(bytemask[row.val - 100])\n # val is key + 100\n expected_value = row.val - 100 if valid else -1\n got_value = row.key\n assert expected_value == got_value\n\n\ndef test_dataframe_empty_concat():\n gdf1 = DataFrame()\n gdf1['a'] = []\n gdf1['b'] = []\n\n gdf2 = gdf1.copy()\n\n gdf3 = gd.concat([gdf1, gdf2])\n assert len(gdf3) == 0\n assert len(gdf3.columns) == 2\n\n\[email protected]('nrows', [0, 3, 10, 100, 1000])\ndef test_nonmatching_index_setitem(nrows):\n np.random.seed(0)\n\n gdf = DataFrame()\n gdf['a'] = np.random.randint(2147483647, size=nrows)\n gdf['b'] = np.random.randint(2147483647, size=nrows)\n gdf = gdf.set_index('b')\n\n test_values = np.random.randint(2147483647, size=nrows)\n gdf['c'] = test_values\n assert(len(test_values) == len(gdf['c']))\n assert(gdf['c'].to_pandas().equals(\n Series(test_values).set_index(gdf._index).to_pandas()))\n\n\[email protected]('nelem', [0, 1, 5, 20, 100])\[email protected]('slice_start', [None, 0, 1, 3, 10])\[email protected]('slice_end', [None, 0, 1, 30, 50, -1])\ndef test_dataframe_masked_slicing(nelem, slice_start, slice_end):\n gdf = DataFrame()\n gdf['a'] = list(range(nelem))\n gdf['b'] = list(range(nelem, 2 * nelem))\n gdf['a'] = gdf['a'].set_mask(utils.random_bitmask(nelem))\n gdf['b'] = gdf['b'].set_mask(utils.random_bitmask(nelem))\n\n def do_slice(x):\n return x[slice_start: slice_end]\n\n expect = do_slice(gdf.to_pandas())\n got = do_slice(gdf).to_pandas()\n\n pd.testing.assert_frame_equal(expect, got)\n\n\ndef test_from_pandas():\n df = pd.DataFrame({'x': [1, 2, 3]}, index=[4., 5., 6.])\n gdf = gd.DataFrame.from_pandas(df)\n assert isinstance(gdf, gd.DataFrame)\n\n pd.testing.assert_frame_equal(df, gdf.to_pandas())\n\n s = df.x\n gs = gd.Series.from_pandas(s)\n assert isinstance(gs, gd.Series)\n\n pd.testing.assert_series_equal(s, gs.to_pandas())\n\n\[email protected](reason=\"constructor does not coerce index inputs\")\ndef test_index_in_dataframe_constructor():\n a = pd.DataFrame({'x': [1, 2, 3]}, index=[4., 5., 6.])\n b = gd.DataFrame({'x': [1, 2, 3]}, index=[4., 5., 6.])\n\n pd.testing.assert_frame_equal(a, b.to_pandas())\n assert pd.testing.assert_frame_equal(a.loc[4:], b.loc[4:].to_pandas())\n\n\[email protected]('nelem', [0, 2, 3, 100, 1000])\[email protected](\n 'data_type',\n ['bool', 'int8', 'int16', 'int32', 'int64',\n 'float32', 'float64', 'datetime64[ms]']\n)\ndef test_from_arrow(nelem, data_type):\n df = pd.DataFrame(\n {\n 'a': np.random.randint(0, 1000, nelem).astype(data_type),\n 'b': np.random.randint(0, 1000, nelem).astype(data_type)\n }\n )\n padf = pa.Table.from_pandas(df, preserve_index=False)\\\n .replace_schema_metadata(None)\n gdf = gd.DataFrame.from_arrow(padf)\n assert isinstance(gdf, gd.DataFrame)\n\n pd.testing.assert_frame_equal(df, gdf.to_pandas())\n\n s = pa.Array.from_pandas(df.a)\n gs = gd.Series.from_arrow(s)\n assert isinstance(gs, gd.Series)\n\n # For some reason PyArrow to_pandas() converts to numpy array and has\n # better type compatibility\n np.testing.assert_array_equal(s.to_pandas(), gs.to_array())\n\n\[email protected]('nelem', [0, 2, 3, 100, 1000])\[email protected](\n 'data_type',\n ['bool', 'int8', 'int16', 'int32', 'int64',\n 'float32', 'float64', 'datetime64[ms]']\n)\ndef test_to_arrow(nelem, data_type):\n df = pd.DataFrame(\n {\n 'a': np.random.randint(0, 1000, nelem).astype(data_type),\n 'b': np.random.randint(0, 1000, nelem).astype(data_type)\n }\n )\n gdf = gd.DataFrame.from_pandas(df)\n\n pa_df = pa.Table.from_pandas(df, preserve_index=False)\\\n .replace_schema_metadata(None)\n # Pandas uses ns so need to cast columns to ms\n if data_type == 'datetime64[ms]':\n pa_df = pa_df.add_column(\n 0,\n pa_df.column(1)\n .cast(pa.timestamp('ms'))\n .cast(pa.int64())\n .cast(pa.date64())\n ).add_column(\n 0,\n pa_df.column(0)\n .cast(pa.timestamp('ms'))\n .cast(pa.int64())\n .cast(pa.date64())\n ).remove_column(2).remove_column(2)\n pa_gdf = gdf.to_arrow(index=False)\n\n assert isinstance(pa_gdf, pa.Table)\n assert pa.Table.equals(pa_df, pa_gdf)\n\n pa_s = pa.Array.from_pandas(df.a)\n # Pandas uses ns so need to cast columns to ms\n if data_type == 'datetime64[ms]':\n pa_s = pa_s.cast(pa.timestamp('ms')).cast(pa.int64()).cast(pa.date64())\n pa_gs = gdf['a'].to_arrow()\n\n assert isinstance(pa_gs, pa.Array)\n assert pa.Array.equals(pa_s, pa_gs)\n\n pa_i = pa.Array.from_pandas(df.index)\n pa_gi = gdf.index.to_arrow()\n\n assert isinstance(pa_gi, pa.Array)\n assert pa.Array.equals(pa_i, pa_gi)\n\n\[email protected](\n 'data_type',\n ['bool', 'int8', 'int16', 'int32', 'int64',\n 'float32', 'float64', 'datetime64[ms]']\n)\ndef test_to_from_arrow_nulls(data_type):\n if data_type == 'datetime64[ms]':\n data_type = pa.date64()\n if data_type == 'bool':\n s1 = pa.array([True, None, False, None, True], type=data_type)\n else:\n s1 = pa.array([1, None, 3, None, 5], type=data_type)\n gs1 = gd.Series.from_arrow(s1)\n assert isinstance(gs1, gd.Series)\n np.testing.assert_array_equal(\n np.array(s1.buffers()[0]),\n gs1.nullmask.to_array()\n )\n assert pa.Array.equals(s1, gs1.to_arrow())\n\n s2 = pa.array([None, None, None, None, None], type=data_type)\n gs2 = gd.Series.from_arrow(s2)\n assert isinstance(gs2, gd.Series)\n np.testing.assert_array_equal(\n np.array(s2.buffers()[0]),\n gs2.nullmask.to_array()\n )\n assert pa.Array.equals(s2, gs2.to_arrow())\n\n\ndef test_to_arrow_categorical():\n df = pd.DataFrame()\n df['a'] = pd.Series(['a', 'b', 'c'], dtype=\"category\")\n gdf = gd.DataFrame.from_pandas(df)\n\n pa_df = pa.Table.from_pandas(df, preserve_index=False)\\\n .replace_schema_metadata(None)\n pa_gdf = gdf.to_arrow(index=False)\n\n assert isinstance(pa_gdf, pa.Table)\n assert pa.Table.equals(pa_df, pa_gdf)\n\n pa_s = pa.Array.from_pandas(df.a)\n pa_gs = gdf['a'].to_arrow()\n\n assert isinstance(pa_gs, pa.Array)\n assert pa.Array.equals(pa_s, pa_gs)\n\n\ndef test_from_arrow_missing_categorical():\n pd_cat = pd.Categorical(['a', 'b', 'c'], categories=['a', 'b'])\n pa_cat = pa.array(pd_cat, from_pandas=True)\n gd_cat = gd.Series(pa_cat)\n\n assert isinstance(gd_cat, gd.Series)\n pd.testing.assert_series_equal(\n pd.Series(pa_cat.to_pandas()), # PyArrow returns a pd.Categorical\n gd_cat.to_pandas()\n )\n\n\[email protected](\n raises=NotImplementedError,\n reason=\"PyArrow does not yet support validity masks in creating \"\n \"DictionaryArray objects\"\n)\ndef test_to_arrow_missing_categorical():\n pd_cat = pd.Categorical(['a', 'b', 'c'], categories=['a', 'b'])\n pa_cat = pa.array(pd_cat, from_pandas=True)\n gd_cat = gd.Series(pa_cat)\n\n assert isinstance(gd_cat, gd.Series)\n assert pa.Array.equals(pa_cat, gd_cat.to_arrow())\n\n\[email protected](\n 'data_type',\n ['int8', 'int16', 'int32', 'int64', 'float32', 'float64', 'datetime64[ms]']\n)\ndef test_from_scalar_typing(data_type):\n if data_type == 'datetime64[ms]':\n scalar = np.dtype('int64').type(np.random.randint(0, 5))\\\n .astype('datetime64[ms]')\n else:\n scalar = np.dtype(data_type).type(np.random.randint(0, 5))\n\n gdf = gd.DataFrame()\n gdf['a'] = [1, 2, 3, 4, 5]\n gdf['b'] = scalar\n assert(gdf['b'].dtype == np.dtype(data_type))\n assert(len(gdf['b']) == len(gdf['a']))\n",
"# Copyright (c) 2018, NVIDIA CORPORATION.\n\nfrom __future__ import print_function, division\n\nimport pandas as pd\nimport numpy as np\nimport pickle\n\nfrom librmm_cffi import librmm as rmm\n\nfrom . import columnops\nfrom cudf.utils import cudautils, utils\nfrom .buffer import Buffer\nfrom .numerical import NumericalColumn\nfrom .column import Column\nfrom .datetime import DatetimeColumn\nfrom cudf.comm.serialize import register_distributed_serializer\n\n\nclass Index(object):\n def serialize(self, serialize):\n header = {}\n header['payload'], frames = serialize(pickle.dumps(self))\n header['frame_count'] = len(frames)\n return header, frames\n\n @classmethod\n def deserialize(cls, deserialize, header, frames):\n payload = deserialize(header['payload'],\n frames[:header['frame_count']])\n return pickle.loads(payload)\n\n def take(self, indices):\n assert indices.dtype.kind in 'iu'\n if indices.size == 0:\n # Empty indices\n return RangeIndex(indices.size)\n else:\n # Gather\n index = cudautils.gather(data=self.gpu_values, index=indices)\n col = self.as_column().replace(data=Buffer(index))\n return GenericIndex(col)\n\n def argsort(self, ascending=True):\n return self.as_column().argsort(ascending=ascending)\n\n @property\n def values(self):\n return np.asarray([i for i in self.as_column()])\n\n def to_pandas(self):\n return pd.Index(self.as_column().to_pandas())\n\n def to_arrow(self):\n return self.as_column().to_arrow()\n\n @property\n def gpu_values(self):\n return self.as_column().to_gpu_array()\n\n def min(self):\n return self.as_column().min()\n\n def max(self):\n return self.as_column().max()\n\n def sum(self):\n return self.as_column().sum()\n\n def find_segments(self):\n \"\"\"Return the beginning index for segments\n\n Returns\n -------\n result : NumericalColumn\n \"\"\"\n segments, _ = self._find_segments()\n return segments\n\n def _find_segments(self):\n seg, markers = cudautils.find_segments(self.gpu_values)\n return NumericalColumn(data=Buffer(seg), dtype=seg.dtype), markers\n\n @classmethod\n def _concat(cls, objs):\n data = Column._concat([o.as_column() for o in objs])\n # TODO: add ability to concatenate indices without always casting to\n # `GenericIndex`\n return GenericIndex(data)\n\n def __eq__(self, other):\n if not isinstance(other, Index):\n return NotImplemented\n elif len(self) != len(other):\n return False\n\n lhs = self.as_column()\n rhs = other.as_column()\n res = lhs.unordered_compare('eq', rhs).all()\n return res\n\n def join(self, other, method, how='left', return_indexers=False):\n column_join_res = self.as_column().join(\n other.as_column(), how=how, return_indexers=return_indexers,\n method=method)\n if return_indexers:\n joined_col, indexers = column_join_res\n joined_index = GenericIndex(joined_col)\n return joined_index, indexers\n else:\n return column_join_res\n\n\nclass RangeIndex(Index):\n \"\"\"Basic start..stop\n \"\"\"\n def __init__(self, start, stop=None, name=None):\n \"\"\"RangeIndex(size), RangeIndex(start, stop)\n\n Parameters\n ----------\n size, start, stop: int\n \"\"\"\n if stop is None:\n start, stop = 0, start\n self._start = int(start)\n self._stop = int(stop)\n self.name = name\n\n def __repr__(self):\n return \"{}(start={}, stop={})\".format(self.__class__.__name__,\n self._start, self._stop)\n\n def __len__(self):\n return max(0, self._stop - self._start)\n\n def __getitem__(self, index):\n if isinstance(index, slice):\n start, stop = utils.normalize_slice(index, len(self))\n start += self._start\n stop += self._start\n if index.step is None:\n return RangeIndex(start, stop)\n else:\n return index_from_range(start, stop, index.step)\n elif isinstance(index, int):\n index = utils.normalize_index(index, len(self))\n index += self._start\n return index\n else:\n raise ValueError(index)\n\n def __eq__(self, other):\n if isinstance(other, RangeIndex):\n return (self._start == other._start and self._stop == other._stop)\n else:\n return super(RangeIndex, self).__eq__(other)\n\n @property\n def dtype(self):\n return np.dtype(np.int64)\n\n def find_label_range(self, first, last):\n # clip first to range\n if first is None or first < self._start:\n begin = self._start\n elif first < self._stop:\n begin = first\n else:\n begin = self._stop\n # clip last to range\n if last is None:\n end = self._stop\n elif last < self._start:\n end = begin\n elif last < self._stop:\n end = last + 1\n else:\n end = self._stop\n # shift to index\n return begin - self._start, end - self._start\n\n def as_column(self):\n if len(self) > 0:\n vals = cudautils.arange(self._start, self._stop, dtype=self.dtype)\n else:\n vals = rmm.device_array(0, dtype=self.dtype)\n return NumericalColumn(data=Buffer(vals), dtype=vals.dtype)\n\n def to_pandas(self):\n return pd.RangeIndex(start=self._start, stop=self._stop,\n dtype=self.dtype)\n\n\ndef index_from_range(start, stop=None, step=None):\n vals = cudautils.arange(start, stop, step, dtype=np.int64)\n return GenericIndex(NumericalColumn(data=Buffer(vals), dtype=vals.dtype))\n\n\nclass GenericIndex(Index):\n def __new__(self, values, name=None):\n from .series import Series\n\n # normalize the input\n if isinstance(values, Series):\n values = values._column\n elif isinstance(values, columnops.TypedColumnBase):\n values = values\n else:\n values = NumericalColumn(data=Buffer(values), dtype=values.dtype)\n\n assert isinstance(values, columnops.TypedColumnBase), type(values)\n assert values.null_count == 0\n\n # Make GenericIndex object\n res = Index.__new__(GenericIndex)\n res._values = values\n res.name = name\n return res\n\n def serialize(self, serialize):\n header = {}\n header['payload'], frames = serialize(self._values)\n header['frame_count'] = len(frames)\n return header, frames\n\n @classmethod\n def deserialize(cls, deserialize, header, frames):\n payload = deserialize(header['payload'],\n frames[:header['frame_count']])\n return cls(payload)\n\n def __sizeof__(self):\n return self._values.__sizeof__()\n\n def __reduce__(self):\n return GenericIndex, tuple([self._values])\n\n def __len__(self):\n return len(self._values)\n\n def __repr__(self):\n vals = [self._values[i] for i in range(min(len(self), 10))]\n return \"{}({}, dtype={})\".format(self.__class__.__name__,\n vals, self._values.dtype)\n\n def __getitem__(self, index):\n res = self._values[index]\n if not isinstance(index, int):\n return GenericIndex(res)\n else:\n return res\n\n def as_column(self):\n \"\"\"Convert the index as a Series.\n \"\"\"\n return self._values\n\n @property\n def dtype(self):\n return self._values.dtype\n\n def find_label_range(self, first, last):\n \"\"\"Find range that starts with *first* and ends with *last*,\n inclusively.\n\n Returns\n -------\n begin, end : 2-tuple of int\n The starting index and the ending index.\n The *last* value occurs at ``end - 1`` position.\n \"\"\"\n col = self._values\n begin, end = None, None\n if first is not None:\n begin = col.find_first_value(first)\n if last is not None:\n end = col.find_last_value(last)\n end += 1\n return begin, end\n\n\nregister_distributed_serializer(RangeIndex)\nregister_distributed_serializer(GenericIndex)\n\n\nclass DatetimeIndex(GenericIndex):\n # TODO this constructor should take a timezone or something to be\n # consistent with pandas\n def __new__(self, values, name=None):\n # we should be more strict on what we accept here but\n # we'd have to go and figure out all the semantics around\n # pandas dtindex creation first which. For now\n # just make sure we handle np.datetime64 arrays\n # and then just dispatch upstream\n if isinstance(values, np.ndarray) and values.dtype.kind == 'M':\n values = DatetimeColumn.from_numpy(values)\n elif isinstance(values, pd.DatetimeIndex):\n values = DatetimeColumn.from_numpy(values.values)\n # can someone look this over, I never remember how to\n # override __new__ properly\n res = Index.__new__(DatetimeIndex)\n res._values = values\n res.name = name\n return res\n\n @property\n def year(self):\n return self.get_dt_field('year')\n\n @property\n def month(self):\n return self.get_dt_field('month')\n\n @property\n def day(self):\n return self.get_dt_field('day')\n\n @property\n def hour(self):\n return self.get_dt_field('hour')\n\n @property\n def minute(self):\n return self.get_dt_field('minute')\n\n @property\n def second(self):\n return self.get_dt_field('second')\n\n def get_dt_field(self, field):\n out_column = self._values.get_dt_field(field)\n # columnops.column_empty_like always returns a Column object\n # but we need a NumericalColumn for GenericIndex..\n # how should this be handled?\n out_column = NumericalColumn(data=out_column.data,\n mask=out_column.mask,\n null_count=out_column.null_count,\n dtype=out_column.dtype)\n return GenericIndex(out_column)\n"
] |
[
[
"pandas.Series",
"numpy.asarray",
"pandas.DataFrame",
"numpy.dtype",
"numpy.random.randn",
"pandas.testing.assert_frame_equal",
"numpy.random.randint",
"numpy.hstack",
"numpy.testing.assert_equal",
"numpy.arange",
"numpy.zeros",
"pandas.Categorical",
"numpy.array",
"numpy.random.random",
"numpy.random.seed",
"numpy.random.shuffle",
"numpy.ones",
"numpy.testing.assert_array_equal",
"numpy.vstack"
],
[
"pandas.RangeIndex",
"numpy.dtype"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
t20100/sandbox
|
[
"eec6bed5c59142a5febc9e212edadf690c0dea8c"
] |
[
"testPlotWidget.py"
] |
[
"# coding: utf-8\n# /*##########################################################################\n#\n# Copyright (c) 2016 European Synchrotron Radiation Facility\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# ###########################################################################*/\n\"\"\"Interactive test script for silx/PyMca PlotWindow\n\nScript options:\n\n- pymca: To use PyMca PlotWindow (default is using silx)\n- gl: To use OpenGL backend (only with PyMca, default is matplotlib)\n\"\"\"\n\n\n# import ######################################################################\n\nimport logging\nimport sys\nimport time\n\nimport numpy as np\nfrom uuid import uuid4\n\nlogging.basicConfig()\nlogger = logging.getLogger()\n\n# import PySide\n\nif hasattr(sys, \"argv\") and \"pymca\" in sys.argv:\n BACKEND = \"pymca\"\n logger.warning(\"Using PyMca PlotWindow\")\n from PyMca5.PyMcaGui import PyMcaQt as qt\n from PyMca5.PyMcaGui.plotting.PlotWindow import PlotWindow\nelse:\n BACKEND = \"silx\"\n logger.warning(\"Using silx PlotWindow\")\n from silx.gui import qt\n from silx.gui.plot import PlotWindow\n\n # logging.getLogger('silx.gui.plot.Plot').setLevel(logging.ERROR)\n\n\n# TestWindow ##################################################################\n\n\nclass TestWindow(PlotWindow):\n _COLORS = \"black\", \"red\", \"green\", \"blue\", None # 'video inverted'\n\n def __init__(self, parent, backend):\n self._colorIndex = 0\n self.__grid = 0\n self.__activeCurve = True\n\n if BACKEND == \"silx\":\n super(TestWindow, self).__init__(\n parent=parent,\n backend=backend,\n control=True,\n position=[\n (\"X\", lambda x, y: x),\n (\"Y\", lambda x, y: y),\n (\"Value\", self._getValue),\n ],\n )\n from silx.gui.plot import PlotTools\n\n self.profileToolBar = PlotTools.ProfileToolBar(plot=self)\n self.addToolBar(self.profileToolBar)\n else:\n super(TestWindow, self).__init__(\n parent=parent,\n backend=backend,\n aspect=True,\n colormap=True,\n control=True,\n position=True,\n roi=True,\n )\n\n self._initMenuBar()\n self.show()\n\n # self.sigPlotSignal.connect(self._plotCallback)\n testImage(self) # testLog(self)\n self.setPanWithArrowKeys(True)\n\n def _getValue(self, x, y):\n image = self.getActiveImage()\n if image is None:\n return \"No image\"\n else:\n data, params = image[0], image[4]\n try:\n row = int((y - params[\"origin\"][1]) / params[\"scale\"][1])\n col = int((x - params[\"origin\"][0]) / params[\"scale\"][0])\n except ValueError:\n return \"-\"\n try:\n value = data[row, col]\n except IndexError:\n return \"-\"\n else:\n return row, col, data[row, col]\n\n def doReplot(self, *args, **kwargs):\n \"\"\"Only calls replot when using PyMca, it is useless with silx.\"\"\"\n if BACKEND == \"pymca\":\n super(TestWindow, self).replot(*args, **kwargs)\n\n def _plotCallback(self, eventDict=None):\n if eventDict[\"event\"] != \"mouseMoved\":\n print(eventDict)\n if eventDict[\"event\"] == \"curveClicked\":\n print(\"setActiveCurve\", eventDict[\"label\"])\n self.setActiveCurve(eventDict[\"label\"])\n if eventDict[\"event\"] == \"drawingFinished\":\n shape = eventDict[\"type\"]\n if shape in [\"polygon\", \"rectangle\"]:\n self.addItem(\n xdata=eventDict[\"xdata\"],\n ydata=eventDict[\"ydata\"],\n legend=str(uuid4()),\n info=None,\n replace=False,\n shape=eventDict[\"type\"],\n fill=True,\n color=\"#000000\",\n )\n elif shape in [\"hline\", \"vline\", \"line\"]:\n self.addItem(\n xdata=eventDict[\"xdata\"],\n ydata=eventDict[\"ydata\"],\n legend=str(uuid4()),\n info=None,\n replace=False,\n shape=eventDict[\"type\"],\n fill=False,\n color=\"#0000FF\",\n )\n\n def _initMenuBar(self):\n # Menu VIEW\n menu = self.menuBar().addMenu(\"View\")\n menu.addAction(\"resetZoom()\", self.resetZoom)\n\n def resetDataMargins():\n self.setDataMargins()\n\n menu.addAction(\"setDataMargins()\", resetDataMargins)\n\n def setDataMargins():\n self.setDataMargins(0.1, 0.2, 0.3, 0.4)\n\n menu.addAction(\"setDataMargins(.1, .2, .3, .4)\", setDataMargins)\n\n def resetZoom():\n self.resetZoom((0.4, 0.3, 0.2, 0.1))\n\n menu.addAction(\"resetZoom(0.4, 0.3, 0.2, 0.1)\", resetZoom)\n\n def toggleAutoScaleX():\n self.setXAxisAutoScale(not self.isXAxisAutoScale())\n self.statusBar().showMessage(\n \"Axis autoscale X: %s, Y: %s\"\n % (self.isXAxisAutoScale(), self.isYAxisAutoScale())\n )\n\n menu.addAction(\"Toggle Autoscale X\", toggleAutoScaleX)\n\n def toggleAutoScaleY():\n self.setYAxisAutoScale(not self.isYAxisAutoScale())\n self.statusBar().showMessage(\n \"Axis autoscale X: %s, Y: %s\"\n % (self.isXAxisAutoScale(), self.isYAxisAutoScale())\n )\n\n menu.addAction(\"Toggle Autoscale Y\", toggleAutoScaleY)\n\n def keepAspectRatio():\n self.keepDataAspectRatio(not self.isKeepDataAspectRatio())\n # Ugly workaround Plot not forwarding isKeepDataAspectRatio\n\n menu.addAction(\"Keep aspect ratio\", keepAspectRatio)\n\n def invertYAxis():\n self.invertYAxis(not self.isYAxisInverted())\n self.doReplot()\n\n menu.addAction(\"Invert Y axis\", invertYAxis)\n\n def changeGrid():\n self.__grid = (self.__grid + 1) % 4\n self.showGrid(self.__grid)\n self.statusBar().showMessage(\"Grid mode: %d\" % self.__grid)\n\n menu.addAction(\"Change Grid\", changeGrid)\n\n def toggleCursor():\n cursor = self.getGraphCursor()\n if cursor is None:\n self.setGraphCursor(True, color=\"red\", linewidth=1)\n else:\n self.setGraphCursor(False)\n self.statusBar().showMessage(\"Cursor mode: %s\" % str(self.getGraphCursor()))\n\n menu.addAction(\"Toggle Cursor\", toggleCursor)\n\n def toggleLogX():\n self.setXAxisLogarithmic(not self.isXAxisLogarithmic())\n self.doReplot()\n self.statusBar().showMessage(\n \"Log Scale X: %s, Y: %s\"\n % (self.isXAxisLogarithmic(), self.isYAxisLogarithmic())\n )\n\n menu.addAction(\"Toggle Log X\", toggleLogX)\n\n def toggleLogY():\n self.setYAxisLogarithmic(not self.isYAxisLogarithmic())\n self.doReplot()\n self.statusBar().showMessage(\n \"Log Scale X: %s, Y: %s\"\n % (self.isXAxisLogarithmic(), self.isYAxisLogarithmic())\n )\n\n menu.addAction(\"Toggle Log Y\", toggleLogY)\n\n def toggleLogY():\n self.setYAxisLogarithmic(not self.isYAxisLogarithmic())\n self.doReplot()\n self.statusBar().showMessage(\n \"Log Scale X: %s, Y: %s\"\n % (self.isXAxisLogarithmic(), self.isYAxisLogarithmic())\n )\n\n menu.addAction(\"Toggle Log Y\", toggleLogY)\n\n def toggleActiveCurve():\n self.enableActiveCurveHandling(not self.isActiveCurveHandlingEnabled())\n self.doReplot()\n self.statusBar().showMessage(\n \"Active curve handling: %s\" % self.isActiveCurveHandlingEnabled()\n )\n\n menu.addAction(\"Toggle Active Curve Handling\", toggleActiveCurve)\n\n def changeBaseVectors():\n baseVectors = self._plot.getBaseVectors()\n if baseVectors == ((1.0, 0.0), (0.0, 1.0)):\n baseVectors = (1.0, 0.5), (0.3, 1.0)\n else:\n baseVectors = (1.0, 0.0), (0.0, 1.0)\n self.setBaseVectors(*baseVectors)\n self.doReplot()\n\n menu.addAction(\"Change base vectors\", changeBaseVectors)\n\n # Menu INTERACTION\n menu = self.menuBar().addMenu(\"Interaction\")\n\n def zoomMode():\n self._colorIndex = (self._colorIndex + 1) % len(self._COLORS)\n color = self._COLORS[self._colorIndex]\n self.statusBar().showMessage(\"Enable zoom, color: %s\" % color)\n self.setZoomModeEnabled(True, color)\n\n menu.addAction(\"Zoom Mode\", zoomMode)\n\n def drawPolygon():\n self.setDrawModeEnabled(True, \"polygon\", label=\"mask\", color=\"red\")\n\n menu.addAction(\"Draw Polygon\", drawPolygon)\n\n def drawRect():\n self.setDrawModeEnabled(True, \"rectangle\", label=\"mask\")\n\n menu.addAction(\"Draw Rectangle\", drawRect)\n\n def drawLine():\n self.setDrawModeEnabled(True, \"line\", label=\"LINE\")\n\n menu.addAction(\"Draw Line\", drawLine)\n\n def drawHLine():\n self.setDrawModeEnabled(True, \"hline\", label=\"HORIZONTAL\")\n\n menu.addAction(\"Draw Horiz. Line\", drawHLine)\n\n def drawVLine():\n self.setDrawModeEnabled(True, \"vline\", label=\"VERTICAL\")\n\n menu.addAction(\"Draw Vert. Line\", drawVLine)\n\n def toggleActiveCurve():\n self.__activeCurve = not self.__activeCurve\n self.enableActiveCurveHandling(self.__activeCurve)\n self.statusBar().showMessage(\n \"Active curve handling: %s\" % self.__activeCurve\n )\n\n menu.addAction(\"Toggle active curve\", toggleActiveCurve)\n\n def panMode():\n if hasattr(self, \"setInteractiveMode\"): # silx\n self.setInteractiveMode(\"pan\")\n else: # matplotlib OpenGL backend\n self._plot.setInteractiveMode(\"pan\")\n\n action = menu.addAction(\"Pan Mode\", panMode)\n if not hasattr(self, \"setInteractiveMode\") and not hasattr(\n self._plot, \"setInteractiveMode\"\n ):\n action.setEnabled(False)\n\n # Menu DATA\n menu = self.menuBar().addMenu(\"Data\")\n\n def clear():\n self.resetTimer()\n self.clear()\n\n menu.addAction(\"Clear\", self.clear)\n\n def saveAsSvg():\n filename = \"testSaveGraph.svg\"\n self.saveGraph(filename, \"svg\")\n self.statusBar().showMessage(\"Saved as %s\" % filename)\n\n menu.addAction(\"Save as svg\", saveAsSvg)\n\n def toggleRightAxis():\n curve = self.getCurve(\"rightTest\")\n if curve is None:\n data = np.arange(1000.0, dtype=np.float32)\n self.addCurve(\n data,\n np.sqrt(data),\n legend=\"rightTest\",\n replace=False,\n replot=True,\n z=5,\n color=\"black\",\n linestyle=\"-\",\n selectable=True,\n xlabel=\"Right X\",\n ylabel=\"Right Y\",\n yaxis=\"right\",\n )\n else:\n self.removeCurve(\"rightTest\")\n\n menu.addAction(\"Right axis data\", toggleRightAxis)\n\n def setUInt16Data():\n dataUInt16 = np.arange(1024 * 1024, dtype=np.uint16)\n dataUInt16.shape = 1024, -1\n\n colormap2 = {\n \"name\": \"temperature\",\n \"normalization\": \"linear\",\n \"autoscale\": False,\n \"vmin\": 1.0,\n \"vmax\": dataUInt16.max(),\n \"colors\": 256,\n }\n self.addImage(\n dataUInt16,\n legend=\"image 2\",\n xScale=(0, 1.0),\n yScale=(100.0, 1.0),\n replace=False,\n replot=True,\n colormap=colormap2,\n alpha=0.5,\n )\n\n menu.addAction(\"DataSet uint16 1\", setUInt16Data)\n\n def setUInt16Data2():\n dataUInt16 = np.arange(1024 * 1024, dtype=np.uint16) + 10000\n dataUInt16.shape = 1024, -1\n\n colormap2 = {\n \"name\": \"temperature\",\n \"normalization\": \"linear\",\n \"autoscale\": False,\n \"vmin\": 1.0,\n \"vmax\": dataUInt16.max(),\n \"colors\": 256,\n }\n self.addImage(\n dataUInt16,\n legend=\"image 2\",\n xScale=(0, 1.0),\n yScale=(0.0, 1.0),\n replace=False,\n replot=True,\n colormap=colormap2,\n )\n\n menu.addAction(\"DataSet uint16 2\", setUInt16Data2)\n\n def testEverythingAction():\n self.resetTimer()\n self.clear()\n self.resetZoom()\n testEverything(self)\n\n menu.addAction(\"Test everything\", testEverythingAction)\n\n def testLogAction():\n self.resetTimer()\n self.clear()\n self.resetZoom()\n testLog(self)\n\n menu.addAction(\"Test Log\", testLogAction)\n\n def testErrorBarsAction():\n self.resetTimer()\n self.clear()\n self.resetZoom()\n testErrorBars(self)\n\n menu.addAction(\"Test Error Bars\", testErrorBarsAction)\n\n def testReversedImagesAction():\n self.resetTimer()\n self.clear()\n self.resetZoom()\n testReversedImages(self)\n\n menu.addAction(\"Test Reversed Images\", testReversedImagesAction)\n\n def testMarkersAction():\n self.resetTimer()\n self.clear()\n self.resetZoom()\n testMarkers(self)\n\n menu.addAction(\"Test Markers\", testMarkersAction)\n\n def testScatterAction():\n self.resetTimer()\n self.clear()\n self.resetZoom()\n testScatter(self)\n\n menu.addAction(\"Test Scatter\", testScatterAction)\n\n def testImageAction():\n self.resetTimer()\n self.clear()\n self.resetZoom()\n testImage(self)\n # self.menuBar().hide()\n\n menu.addAction(\"Test Image\", testImageAction)\n\n def testStreamingAction():\n self.resetTimer()\n self.clear()\n self.streaming()\n self.resetZoom()\n self.useTimer(self.streaming, 100)\n\n menu.addAction(\"Test Streaming\", testStreamingAction)\n\n def streaming(self):\n data = np.asarray(np.random.random(512 * 512), dtype=np.float32)\n data.shape = 512, 512\n # resetzoom=False to avoid resetZoom\n self.addImage(data, replace=False, replot=False)\n self.doReplot()\n\n def useTimer(self, callback, timeoutMS):\n self.timer = qt.QTimer()\n self.timer.timeout.connect(callback)\n self.timer.start(timeoutMS)\n\n def resetTimer(self):\n if hasattr(self, \"timer\"):\n self.timer.stop()\n del self.timer\n\n def keyPressEvent(self, event):\n \"\"\"Forward key events to plot widget...\n\n Find a better way to do it\n \"\"\"\n super(TestWindow, self).keyPressEvent(event)\n\n\n# test ########################################################################\n\n# Testing Latin-1 characters with python 2.x\n# title = u'Title !#$%&\\'()*+,-./¡¢£¤¥¦§¨©ª«¬-®¯'\n# xLabel = u'Rows ÐÑÒÓÔÕÖרÙÚÛÜuÝÞß'\n# yLabel = u'Columns ðñòóôõö÷øùúûüýþÿ'\n\n# Testing Latin-1 characters with python 3.x\n# title = 'Title !#$%&\\'()*+,-./¡¢£¤¥¦§¨©ª«¬-®¯'\n# xLabel = 'Rows ÐÑÒÓÔÕÖרÙÚÛÜuÝÞß'\n# yLabel = 'Columns ðñòóôõö÷øùúûüýþÿ'\n\ntitle = \"Title\"\nxLabel = \"Rows\"\nyLabel = \"Columns\"\n\n\ndef testEverything(w):\n \"\"\"Dummy test of many stuff.\"\"\"\n w.setXAxisLogarithmic(False)\n w.setYAxisLogarithmic(False)\n\n w.setGraphTitle(title)\n w.setGraphXLabel(xLabel)\n w.setGraphYLabel(yLabel)\n\n norm = \"log\"\n\n size = 1024\n data = np.arange(float(size) * size, dtype=np.float32)\n data.shape = size, size\n\n dataUInt16 = np.array(data, dtype=np.uint16)\n dataUInt8 = np.array(data, dtype=np.uint8)\n\n w.addItem(\n xdata=np.array((0, 0, 200, 200)),\n ydata=np.array((0, 200, 200, 0)),\n legend=\"test\",\n info=None,\n replace=False,\n shape=\"polygon\",\n fill=True,\n color=\"blue\",\n )\n w.addItem(\n xdata=np.array((200, 200, 400, 400)),\n ydata=np.array((200, 400, 400, 200)),\n legend=\"test2\",\n info=None,\n replace=False,\n shape=\"polygon\",\n fill=False,\n color=\"green\",\n )\n w.addItem(\n xdata=np.array((1300, 1600, 1900, 1300, 1900)),\n ydata=np.array((-700, -200, -700, -300, -300)),\n legend=\"star\",\n info=None,\n replace=False,\n shape=\"polygon\",\n fill=True,\n color=\"#000000\",\n )\n\n colormap = {\n \"name\": \"temperature\",\n \"normalization\": \"linear\",\n \"autoscale\": True,\n \"vmin\": 0.0,\n \"vmax\": 1.0,\n \"colors\": 256,\n }\n w.addImage(\n data,\n legend=\"image 1\",\n xScale=(25, 1.0),\n yScale=(-size, 1.0),\n replot=False,\n colormap=colormap,\n z=2,\n )\n\n colormap2 = {\n \"name\": \"temperature\",\n \"normalization\": norm,\n \"autoscale\": False,\n \"vmin\": 1.0,\n \"vmax\": dataUInt16.max(),\n \"colors\": 256,\n }\n w.addImage(\n dataUInt16,\n legend=\"image 2\",\n xScale=(0, 1.0),\n yScale=(0.0, 1.0),\n replace=False,\n replot=False,\n colormap=colormap2,\n )\n\n colormap3 = {\n \"name\": \"red\",\n \"normalization\": \"linear\",\n \"autoscale\": True,\n \"vmin\": 0.0,\n \"vmax\": 1.0,\n \"colors\": 256,\n }\n w.addImage(\n dataUInt8,\n legend=\"image 3\",\n xScale=(size, 1.0),\n yScale=(-size / 2, 1.0),\n replace=False,\n replot=False,\n colormap=colormap3,\n )\n\n rgbData = np.array(\n (\n ((0, 0, 0), (128, 0, 0), (255, 0, 0)),\n ((0, 128, 0), (0, 128, 128), (0, 128, 256)),\n ),\n dtype=np.uint8,\n )\n w.addImage(\n rgbData,\n legend=\"rgb\",\n xScale=(-30, 10),\n yScale=(-20, 10),\n replace=False,\n replot=False,\n )\n\n rgbaData = np.array(\n (\n ((0, 0, 0, 0.5), (0.5, 0, 0, 1), (1, 0, 0, 0.5)),\n ((0, 0.5, 0, 1), (0, 0.5, 0.5, 1), (0, 1, 1, 0.5)),\n ),\n dtype=np.float32,\n )\n w.addImage(\n rgbaData,\n legend=\"rgba\",\n xScale=(-5, 10),\n yScale=(200, 10),\n replace=False,\n replot=False,\n )\n\n size = 2000\n data2 = np.arange(float(size) * 3, dtype=np.dtype(np.float32))\n data2.shape = 3, size\n w.addImage(\n data2,\n legend=\"image 4\",\n xScale=(100, 1.0),\n yScale=(-200.0, 200.0),\n replace=False,\n replot=False,\n colormap=colormap3,\n selectable=True,\n draggable=False,\n )\n\n # curves\n xData = np.arange(1000)\n yData = np.random.random(1000)\n colorData = np.array(np.random.random(3 * 1000), dtype=np.float32)\n colorData.shape = 1000, 3\n\n w.addCurve(\n xData,\n -50 + 10 * np.sin(xData),\n legend=\"curve 1\",\n replace=False,\n replot=False,\n linestyle=\"\",\n symbol=\"s\",\n z=3,\n xlabel=\"Curve 1 X\",\n ylabel=\"Curve 1 Y\",\n ) # , fill=True)\n w.addCurve(\n xData + 1000,\n xData - 1000 + 200 * yData,\n legend=\"curve 2\",\n replace=False,\n replot=False,\n color=\"green\", # color=colorData,\n linestyle=\"-\",\n symbol=\"o\",\n selectable=True,\n xlabel=\"Curve 2 X\",\n ylabel=\"Curve 2 Y\",\n )\n\n # markers\n w.insertXMarker(1000, \"testX\", None, color=\"pink\", selectable=False, draggable=True)\n w.insertYMarker(\n -600, \"testY\", \"markerY\", color=\"black\", selectable=False, draggable=True\n )\n\n w.insertMarker(\n 1000,\n 500,\n \"constraintV\",\n \"constraint Vert\",\n color=\"black\",\n selectable=False,\n draggable=True,\n symbol=\"o\",\n constraint=\"v\",\n )\n w.insertMarker(\n 1000,\n 600,\n \"constraintH\",\n \"constraint Horiz\",\n color=\"blue\",\n selectable=False,\n draggable=True,\n symbol=\"d\",\n constraint=\"h\",\n )\n\n def squareConstraint(x, y):\n return min(1500, max(x, 900)), min(800, max(y, 200))\n\n w.insertMarker(\n 1000,\n 700,\n \"constraint\",\n \"constraint\",\n color=\"red\",\n selectable=False,\n draggable=True,\n symbol=\"+\",\n constraint=squareConstraint,\n )\n\n w.insertMarker(\n -100,\n 500,\n \"testS\",\n \"markerSelect\",\n color=\"black\",\n selectable=True,\n draggable=False,\n )\n w.insertXMarker(\n 500, \"test\", \"marker\", color=\"black\", selectable=False, draggable=False\n )\n\n w.resetZoom()\n\n\n# test ########################################################################\n\n\ndef testMarkers(w):\n # markers\n w.insertXMarker(1000, \"testX\", None, color=\"pink\", selectable=False, draggable=True)\n w.insertYMarker(\n 600, \"testY\", \"markerY\", color=\"black\", selectable=False, draggable=True\n )\n\n w.insertMarker(\n 1000,\n 500,\n \"constraintV\",\n \"constraint Vert\",\n color=\"black\",\n selectable=False,\n draggable=True,\n symbol=\"o\",\n constraint=\"v\",\n )\n w.insertMarker(\n 1000,\n 600,\n \"constraintH\",\n \"constraint Horiz\",\n color=\"blue\",\n selectable=False,\n draggable=True,\n symbol=\"d\",\n constraint=\"h\",\n )\n\n def squareConstraint(x, y):\n return min(1500, max(x, 900)), min(800, max(y, 200))\n\n w.insertMarker(\n 1000,\n 700,\n \"constraint\",\n \"constraint\",\n color=\"red\",\n selectable=False,\n draggable=True,\n symbol=\"*\",\n constraint=squareConstraint,\n )\n\n w.insertMarker(\n 100,\n 500,\n \"testS\",\n \"markerSelect\",\n color=\"black\",\n selectable=True,\n draggable=False,\n )\n w.insertXMarker(\n 500, \"test\", \"marker\", color=\"black\", selectable=False, draggable=False\n )\n\n # Add one curve\n data = np.array((1.0, 2000.0))\n w.addCurve(x=data, y=data)\n\n w.resetZoom()\n\n\n# test ########################################################################\n\n\ndef testLog(w):\n w.keepDataAspectRatio(False)\n w.setXAxisLogarithmic(True)\n w.setYAxisLogarithmic(True)\n\n # Items\n w.addItem(\n np.array((200, 200, 400, 400)),\n np.array((200, 40000, 40000, 200)),\n legend=\"test2\",\n info=None,\n replace=False,\n shape=\"polygon\",\n color=\"green\",\n ) # , fill=False\n\n # Image\n # size = 1024\n # data = np.arange(float(size)*size, dtype=np.float32)\n # data.shape = size,size\n # colormap = {'name': 'gray', 'normalization':'linear',\n # 'autoscale':True, 'vmin':0.0, 'vmax':1.0,\n # 'colors':256}\n # w.addImage(data, legend=\"image 1\",\n # xScale=(1.0, 1.0) , yScale=(1.0, 1.0),\n # replot=False, colormap=colormap)\n\n # curves\n xData = np.arange(1000.0) + 1.0\n\n # print('add curve right')\n # w.addCurve(xData, 1./xData ** 8, legend=\"curve right\",\n # #color='#FF000080',\n # replace=False, replot=False, linestyle=\"-\", symbol=\"o\",\n # xlabel=\"curve Right X\", ylabel=\"curve Right Y\",\n # #selectable=True,\n # yaxis=\"right\") #fill=True)\n # w.setActiveCurve(\"curve right\", False)\n\n w.addCurve(\n xData,\n xData ** 8,\n legend=\"curve 2\",\n z=2,\n # color='#0000FF80',\n replace=False,\n replot=False,\n linestyle=\"-\",\n symbol=\"o\",\n xlabel=\"curve 2 X\",\n ylabel=\"curve 2 Y\",\n linewidth=3,\n # selectable=True,\n yaxis=\"left\",\n ) # fill=True)\n\n w.addCurve(\n xData,\n (xData - 100.0) ** 7,\n legend=\"curve minus\",\n z=1,\n # color='#0000FF80',\n replace=False,\n replot=False,\n linestyle=\"--\",\n symbol=\"o\",\n xlabel=\"curve Minus X\",\n ylabel=\"curve Minus Y\",\n # selectable=True,\n yaxis=\"left\",\n )\n\n w.addCurve(\n xData,\n xData ** 7,\n legend=\"curve 1\",\n z=1,\n # color='#0000FF80',\n replace=False,\n replot=False,\n linestyle=\":\",\n symbol=\"o\",\n xlabel=\"curve Minus X\",\n ylabel=\"curve Minus Y\",\n # selectable=True,\n yaxis=\"left\",\n )\n # markers\n # w.insertXMarker(1000, 'testX', 'markerX', color='pink',\n # selectable=False, draggable=True)\n # w.insertYMarker(1000, 'testY', 'markerY', color='black',\n # selectable=False, draggable=True)\n # w.insertMarker(1000, 500, 'testXY', 'markerPt', color='black',\n # selectable=False, draggable=True)\n\n w.resetZoom()\n\n\n# test ########################################################################\n\n\ndef testErrorBars(w):\n w.enableActiveCurveHandling(False)\n w.keepDataAspectRatio(False)\n w.setXAxisLogarithmic(False)\n w.setYAxisLogarithmic(False)\n\n # curves\n xData = np.arange(100.0) + 1.0\n yData = xData\n xError = (np.arange(100.0), 100.0 - np.arange(100.0))\n yError = np.ones((100,)) * 0.5\n w.addCurve(\n xData,\n yData,\n legend=\"curve error bars\",\n color=\"red\",\n replace=False,\n replot=False,\n linestyle=\"-\",\n symbol=\"o\",\n xlabel=\"X\",\n ylabel=\"Y\",\n xerror=xError,\n yerror=yError,\n # selectable=True,\n yaxis=\"left\",\n ) # fill=True)\n\n size = 100\n x = np.random.random(size) * size\n y = np.random.random(size) * size\n color = np.random.random(size * 3).reshape(size, -1)\n\n w.addCurve(\n x,\n y,\n legend=\"scatter\",\n color=color,\n symbol=\"o\",\n linestyle=\" \",\n xerror=1.0,\n yerror=2.0,\n )\n\n w.resetZoom()\n\n\n# test ########################################################################\n\n\ndef testReversedImages(w):\n \"\"\"Dummy reversed image [x|y]Scale.\"\"\"\n w.setXAxisLogarithmic(False)\n w.setYAxisLogarithmic(False)\n\n w.setGraphTitle(title)\n w.setGraphXLabel(xLabel)\n w.setGraphYLabel(yLabel)\n\n size = 1024\n data = np.arange(float(size) * size, dtype=np.float32)\n data.shape = size, size\n trans = np.array(data.T, copy=True)\n\n w.addImage(\n data,\n legend=\"image1\",\n xScale=(1025.0, -1.0),\n yScale=(513.0, -0.5),\n replace=False,\n replot=False,\n draggable=True,\n )\n w.addImage(\n trans,\n legend=\"image2\",\n xScale=(1025.0, -1.0),\n yScale=(1025.0, -0.5),\n replace=False,\n replot=False,\n )\n\n # Workaround matplotlib inverting X axis with image with xScale < 0\n # w.setGraphXLimits(*w.getGraphXLimits())\n w.resetZoom()\n\n\n# test ########################################################################\n\n\ndef testScatter(w):\n \"\"\"Scatter plot.\"\"\"\n w.setXAxisLogarithmic(False)\n w.setYAxisLogarithmic(False)\n\n w.setGraphTitle(\"Test Scatter\")\n w.enableActiveCurveHandling(False)\n\n size = 512\n x = np.arange(size, dtype=np.float32)\n y = np.random.random(size)\n color = np.random.random(size * 3).reshape(size, -1)\n\n w.addCurve(x, y, legend=\"scatter\", color=color, symbol=\"o\", linestyle=\" \")\n\n x = np.arange(size, dtype=np.float32)\n y = np.random.random(size) + 1.0\n color = np.random.random(size * 3).reshape(size, -1)\n\n w.addCurve(x, y, legend=\"scatter 2\", color=color, symbol=\"o\", linestyle=\"-\")\n\n w.resetZoom()\n\n\n# test ########################################################################\n\n\ndef testImage(w):\n \"\"\"Small image.\"\"\"\n w.setXAxisLogarithmic(False)\n w.setYAxisLogarithmic(False)\n\n w.enableActiveCurveHandling(False)\n\n w.addImage(\n TEST_DATA,\n colormap={\n \"name\": \"temperature\",\n \"normalization\": \"linear\",\n \"autoscale\": True,\n \"vmin\": 0.0,\n \"vmax\": 1.0,\n },\n )\n\n w.resetZoom()\n\n\n# attic #######################################################################\n\n\ndef attic():\n # Second plot\n w2 = PlotWindow.PlotWindow(parent=None, backend=backend)\n w2.setGraphTitle(\"Title 2\")\n w2.setGraphXLabel(\"Rows 2\")\n w2.setGraphYLabel(\"Columns 2\")\n\n size = 500\n dataList = [\n np.arange(float(size) * size, dtype=np.dtype(np.float32)),\n np.random.random_sample(size * size).astype(np.float32),\n ]\n dataList[0].shape = size, size\n dataList[1].shape = size, size\n dataList[1].dtype = np.dtype(np.float32)\n xData = np.arange(2000)\n yData = [np.random.random(2000) * 500, np.random.random(2000) * 500]\n counter = 0\n times = []\n\n def timerTest():\n global counter, times\n colormap = {\n \"name\": \"temperature\",\n \"normalization\": \"linear\",\n \"autoscale\": True,\n \"vmin\": 0.0,\n \"vmax\": 1.0,\n \"colors\": 256,\n }\n w2.addImage(\n dataList[counter % len(dataList)],\n legend=\"image\",\n xScale=(0, 1.0),\n yScale=(0, 1.0),\n replace=False,\n replot=True,\n colormap=colormap,\n )\n w2.addCurve(\n xData,\n yData[counter % len(yData)],\n legend=\"curve\",\n replace=False,\n replot=True,\n color=\"black\",\n )\n counter += 1\n if len(times) < 10:\n times.append(time.time())\n else:\n times.append(time.time())\n # fps = len(times) / (times[-1] - times[0])\n times.pop(0)\n # print('FPS', fps)\n\n timer = qt.QtCore.QTimer()\n qt.QtCore.QObject.connect(timer, qt.QtCore.SIGNAL(\"timeout()\"), timerTest)\n\n timer.start(1000)\n\n w2.getWidgetHandle().show()\n\n\nTEST_DATA = np.array(\n (\n 226,\n 228,\n 242,\n 241,\n 235,\n 239,\n 198,\n 219,\n 248,\n 224,\n 248,\n 216,\n 226,\n 257,\n 208,\n 223,\n 216,\n 221,\n 218,\n 229,\n 248,\n 238,\n 216,\n 259,\n 234,\n 231,\n 217,\n 229,\n 235,\n 204,\n 225,\n 240,\n 238,\n 235,\n 213,\n 225,\n 245,\n 214,\n 240,\n 232,\n 254,\n 235,\n 245,\n 233,\n 254,\n 234,\n 227,\n 215,\n 227,\n 249,\n 236,\n 237,\n 226,\n 220,\n 241,\n 219,\n 213,\n 225,\n 258,\n 191,\n 236,\n 224,\n 214,\n 206,\n 238,\n 255,\n 217,\n 225,\n 240,\n 249,\n 237,\n 214,\n 214,\n 241,\n 220,\n 244,\n 241,\n 239,\n 249,\n 224,\n 231,\n 214,\n 246,\n 231,\n 211,\n 239,\n 221,\n 226,\n 224,\n 225,\n 235,\n 204,\n 236,\n 222,\n 243,\n 229,\n 226,\n 213,\n 232,\n 237,\n 220,\n 206,\n 211,\n 220,\n 225,\n 249,\n 260,\n 230,\n 238,\n 228,\n 206,\n 210,\n 223,\n 229,\n 223,\n 223,\n 246,\n 254,\n 235,\n 207,\n 212,\n 250,\n 223,\n 222,\n 240,\n 241,\n 237,\n 233,\n 229,\n 234,\n 190,\n 236,\n 242,\n 240,\n 209,\n 229,\n 207,\n 230,\n 240,\n 234,\n 208,\n 216,\n 233,\n 234,\n 226,\n 256,\n 237,\n 247,\n 225,\n 210,\n 224,\n 257,\n 237,\n 251,\n 237,\n 220,\n 243,\n 206,\n 222,\n 240,\n 264,\n 230,\n 213,\n 247,\n 238,\n 234,\n 225,\n 240,\n 260,\n 228,\n 214,\n 225,\n 219,\n 259,\n 248,\n 247,\n 245,\n 254,\n 229,\n 229,\n 242,\n 227,\n 244,\n 240,\n 214,\n 235,\n 217,\n 246,\n 246,\n 217,\n 236,\n 216,\n 215,\n 212,\n 200,\n 233,\n 263,\n 231,\n 253,\n 222,\n 212,\n 199,\n 261,\n 232,\n 206,\n 228,\n 202,\n 195,\n 228,\n 230,\n 210,\n 209,\n 235,\n 213,\n 234,\n 249,\n 187,\n 200,\n 202,\n 211,\n 234,\n 225,\n 244,\n 226,\n 242,\n 240,\n 201,\n 227,\n 209,\n 231,\n 221,\n 245,\n 224,\n 199,\n 231,\n 227,\n 242,\n 236,\n 214,\n 234,\n 242,\n 241,\n 233,\n 224,\n 226,\n 250,\n 188,\n 231,\n 221,\n 235,\n 234,\n 210,\n 232,\n 230,\n 191,\n 219,\n 248,\n 230,\n 220,\n 249,\n 195,\n 226,\n 224,\n 219,\n 217,\n 202,\n 254,\n 235,\n 212,\n 230,\n 237,\n 238,\n 234,\n 235,\n 223,\n 230,\n 211,\n 236,\n 242,\n 248,\n 246,\n 247,\n 250,\n 281,\n 253,\n 236,\n 218,\n 211,\n 231,\n 200,\n 224,\n 208,\n 237,\n 239,\n 222,\n 219,\n 212,\n 225,\n 226,\n 221,\n 227,\n 256,\n 245,\n 233,\n 224,\n 206,\n 224,\n 200,\n 247,\n 208,\n 227,\n 218,\n 196,\n 243,\n 236,\n 234,\n 215,\n 217,\n 224,\n 245,\n 225,\n 220,\n 247,\n 230,\n 247,\n 213,\n 246,\n 228,\n 233,\n 207,\n 224,\n 256,\n 201,\n 222,\n 211,\n 245,\n 218,\n 227,\n 240,\n 197,\n 256,\n 195,\n 208,\n 257,\n 230,\n 222,\n 239,\n 222,\n 203,\n 198,\n 225,\n 205,\n 235,\n 204,\n 264,\n 243,\n 231,\n 235,\n 221,\n 258,\n 239,\n 216,\n 260,\n 237,\n 216,\n 238,\n 228,\n 226,\n 216,\n 243,\n 241,\n 216,\n 216,\n 222,\n 240,\n 226,\n 224,\n 205,\n 203,\n 230,\n 243,\n 212,\n 221,\n 239,\n 241,\n 241,\n 204,\n 201,\n 241,\n 217,\n 212,\n 237,\n 237,\n 233,\n 225,\n 239,\n 225,\n 229,\n 301,\n 397,\n 597,\n 587,\n 369,\n 275,\n 247,\n 238,\n 240,\n 227,\n 224,\n 230,\n 238,\n 230,\n 214,\n 211,\n 237,\n 243,\n 244,\n 228,\n 238,\n 206,\n 230,\n 215,\n 203,\n 216,\n 233,\n 239,\n 222,\n 252,\n 244,\n 254,\n 218,\n 222,\n 240,\n 202,\n 250,\n 197,\n 228,\n 207,\n 230,\n 205,\n 210,\n 247,\n 236,\n 248,\n 219,\n 271,\n 221,\n 227,\n 216,\n 227,\n 222,\n 215,\n 227,\n 216,\n 254,\n 235,\n 215,\n 242,\n 231,\n 230,\n 246,\n 215,\n 225,\n 233,\n 257,\n 225,\n 229,\n 227,\n 196,\n 220,\n 238,\n 228,\n 202,\n 211,\n 237,\n 229,\n 215,\n 235,\n 186,\n 216,\n 240,\n 256,\n 228,\n 247,\n 238,\n 210,\n 245,\n 244,\n 213,\n 238,\n 234,\n 233,\n 203,\n 236,\n 223,\n 253,\n 236,\n 264,\n 216,\n 206,\n 239,\n 232,\n 201,\n 249,\n 242,\n 211,\n 231,\n 219,\n 197,\n 253,\n 235,\n 206,\n 234,\n 215,\n 233,\n 261,\n 240,\n 386,\n 1725,\n 5792,\n 5959,\n 3810,\n 965,\n 295,\n 251,\n 264,\n 256,\n 219,\n 247,\n 245,\n 221,\n 218,\n 222,\n 236,\n 219,\n 235,\n 210,\n 207,\n 259,\n 234,\n 212,\n 228,\n 226,\n 258,\n 199,\n 222,\n 220,\n 225,\n 222,\n 202,\n 242,\n 229,\n 234,\n 210,\n 229,\n 200,\n 241,\n 213,\n 240,\n 226,\n 226,\n 209,\n 227,\n 208,\n 254,\n 220,\n 219,\n 182,\n 206,\n 234,\n 227,\n 215,\n 217,\n 220,\n 213,\n 253,\n 216,\n 232,\n 208,\n 225,\n 232,\n 234,\n 225,\n 210,\n 203,\n 264,\n 237,\n 204,\n 221,\n 213,\n 258,\n 231,\n 215,\n 225,\n 244,\n 223,\n 228,\n 217,\n 248,\n 226,\n 236,\n 201,\n 240,\n 207,\n 233,\n 215,\n 230,\n 231,\n 250,\n 219,\n 240,\n 205,\n 235,\n 221,\n 217,\n 222,\n 227,\n 251,\n 237,\n 224,\n 214,\n 235,\n 233,\n 255,\n 217,\n 235,\n 230,\n 233,\n 231,\n 233,\n 214,\n 236,\n 235,\n 270,\n 274,\n 499,\n 4198,\n 9829,\n 8252,\n 9536,\n 4991,\n 741,\n 252,\n 252,\n 228,\n 220,\n 235,\n 209,\n 222,\n 205,\n 228,\n 231,\n 264,\n 242,\n 236,\n 233,\n 248,\n 218,\n 251,\n 246,\n 220,\n 249,\n 234,\n 223,\n 225,\n 253,\n 224,\n 204,\n 225,\n 213,\n 226,\n 223,\n 206,\n 251,\n 249,\n 216,\n 216,\n 205,\n 203,\n 235,\n 231,\n 236,\n 202,\n 223,\n 235,\n 214,\n 238,\n 222,\n 240,\n 252,\n 220,\n 232,\n 242,\n 205,\n 229,\n 206,\n 226,\n 237,\n 211,\n 234,\n 220,\n 233,\n 255,\n 230,\n 209,\n 234,\n 220,\n 243,\n 183,\n 218,\n 236,\n 216,\n 232,\n 238,\n 236,\n 220,\n 219,\n 234,\n 192,\n 210,\n 229,\n 255,\n 255,\n 220,\n 206,\n 222,\n 238,\n 241,\n 216,\n 237,\n 229,\n 225,\n 235,\n 249,\n 255,\n 223,\n 224,\n 234,\n 229,\n 215,\n 212,\n 240,\n 249,\n 222,\n 226,\n 238,\n 225,\n 236,\n 226,\n 221,\n 231,\n 256,\n 261,\n 255,\n 548,\n 3225,\n 8745,\n 6302,\n 7572,\n 14054,\n 4268,\n 547,\n 341,\n 271,\n 289,\n 259,\n 260,\n 239,\n 264,\n 240,\n 254,\n 231,\n 244,\n 222,\n 249,\n 222,\n 202,\n 218,\n 250,\n 221,\n 235,\n 217,\n 223,\n 214,\n 224,\n 210,\n 224,\n 250,\n 210,\n 240,\n 214,\n 235,\n 250,\n 198,\n 221,\n 217,\n 184,\n 237,\n 210,\n 204,\n 226,\n 233,\n 218,\n 211,\n 224,\n 209,\n 241,\n 257,\n 236,\n 224,\n 244,\n 235,\n 223,\n 223,\n 223,\n 266,\n 234,\n 238,\n 207,\n 223,\n 230,\n 221,\n 228,\n 226,\n 216,\n 205,\n 203,\n 239,\n 231,\n 215,\n 221,\n 225,\n 217,\n 218,\n 224,\n 239,\n 224,\n 226,\n 241,\n 207,\n 240,\n 221,\n 222,\n 222,\n 224,\n 229,\n 236,\n 235,\n 215,\n 228,\n 243,\n 202,\n 216,\n 219,\n 231,\n 239,\n 227,\n 237,\n 225,\n 238,\n 246,\n 236,\n 264,\n 260,\n 262,\n 242,\n 252,\n 220,\n 285,\n 297,\n 382,\n 638,\n 1440,\n 3705,\n 6656,\n 6965,\n 6215,\n 13217,\n 9489,\n 2579,\n 1206,\n 1209,\n 1189,\n 1152,\n 1060,\n 964,\n 790,\n 585,\n 450,\n 347,\n 303,\n 227,\n 236,\n 202,\n 216,\n 245,\n 218,\n 230,\n 242,\n 233,\n 211,\n 215,\n 224,\n 227,\n 208,\n 223,\n 214,\n 224,\n 193,\n 248,\n 236,\n 236,\n 191,\n 233,\n 217,\n 243,\n 208,\n 221,\n 213,\n 222,\n 227,\n 239,\n 219,\n 253,\n 233,\n 228,\n 219,\n 234,\n 235,\n 235,\n 222,\n 239,\n 220,\n 211,\n 219,\n 211,\n 225,\n 226,\n 269,\n 192,\n 239,\n 234,\n 228,\n 230,\n 217,\n 234,\n 213,\n 214,\n 231,\n 209,\n 225,\n 218,\n 173,\n 202,\n 234,\n 236,\n 217,\n 229,\n 203,\n 257,\n 224,\n 225,\n 214,\n 235,\n 227,\n 221,\n 233,\n 203,\n 209,\n 219,\n 228,\n 201,\n 266,\n 247,\n 232,\n 222,\n 244,\n 232,\n 265,\n 258,\n 275,\n 251,\n 241,\n 231,\n 313,\n 398,\n 527,\n 738,\n 1187,\n 1823,\n 2703,\n 3010,\n 3563,\n 4929,\n 6175,\n 7530,\n 8699,\n 10691,\n 5792,\n 1905,\n 1215,\n 1204,\n 1196,\n 1251,\n 1209,\n 1249,\n 1451,\n 1783,\n 1847,\n 1460,\n 799,\n 497,\n 319,\n 271,\n 209,\n 246,\n 214,\n 205,\n 213,\n 262,\n 237,\n 240,\n 240,\n 221,\n 243,\n 198,\n 232,\n 252,\n 209,\n 235,\n 194,\n 195,\n 223,\n 233,\n 227,\n 210,\n 225,\n 185,\n 222,\n 220,\n 226,\n 206,\n 253,\n 218,\n 241,\n 228,\n 238,\n 225,\n 211,\n 230,\n 200,\n 253,\n 223,\n 215,\n 217,\n 233,\n 217,\n 234,\n 220,\n 247,\n 239,\n 237,\n 242,\n 228,\n 254,\n 241,\n 232,\n 216,\n 259,\n 261,\n 208,\n 219,\n 222,\n 214,\n 238,\n 223,\n 236,\n 236,\n 236,\n 221,\n 221,\n 216,\n 197,\n 231,\n 228,\n 233,\n 231,\n 229,\n 226,\n 226,\n 225,\n 228,\n 225,\n 230,\n 274,\n 267,\n 319,\n 284,\n 336,\n 376,\n 526,\n 887,\n 1386,\n 1875,\n 2044,\n 1917,\n 2102,\n 2440,\n 2905,\n 3546,\n 3773,\n 4548,\n 8639,\n 10594,\n 10138,\n 10459,\n 9246,\n 3735,\n 1792,\n 1508,\n 1325,\n 1307,\n 1111,\n 1039,\n 1166,\n 1132,\n 1290,\n 1414,\n 1485,\n 1550,\n 1632,\n 984,\n 449,\n 307,\n 243,\n 236,\n 224,\n 208,\n 243,\n 247,\n 218,\n 221,\n 238,\n 221,\n 212,\n 202,\n 229,\n 226,\n 242,\n 232,\n 239,\n 233,\n 231,\n 225,\n 220,\n 213,\n 212,\n 231,\n 218,\n 238,\n 237,\n 231,\n 238,\n 213,\n 245,\n 209,\n 236,\n 224,\n 215,\n 201,\n 200,\n 223,\n 203,\n 203,\n 231,\n 218,\n 224,\n 223,\n 200,\n 218,\n 228,\n 234,\n 212,\n 228,\n 234,\n 220,\n 227,\n 210,\n 226,\n 225,\n 214,\n 220,\n 232,\n 227,\n 239,\n 233,\n 208,\n 239,\n 241,\n 241,\n 232,\n 246,\n 260,\n 233,\n 218,\n 237,\n 244,\n 256,\n 213,\n 235,\n 222,\n 265,\n 256,\n 286,\n 394,\n 964,\n 1304,\n 1664,\n 2538,\n 3115,\n 2939,\n 2589,\n 2205,\n 2295,\n 2774,\n 3467,\n 4305,\n 4756,\n 3716,\n 3318,\n 3136,\n 5070,\n 6165,\n 5558,\n 6656,\n 10540,\n 4836,\n 2040,\n 2092,\n 2524,\n 2900,\n 2764,\n 1983,\n 1444,\n 1190,\n 1161,\n 1146,\n 1402,\n 1603,\n 1625,\n 1429,\n 1434,\n 1350,\n 819,\n 379,\n 288,\n 241,\n 228,\n 242,\n 224,\n 254,\n 219,\n 262,\n 231,\n 205,\n 209,\n 235,\n 230,\n 218,\n 252,\n 229,\n 240,\n 243,\n 215,\n 203,\n 194,\n 224,\n 228,\n 200,\n 220,\n 220,\n 216,\n 211,\n 208,\n 218,\n 214,\n 243,\n 241,\n 212,\n 239,\n 231,\n 203,\n 205,\n 216,\n 210,\n 231,\n 198,\n 242,\n 232,\n 195,\n 227,\n 212,\n 234,\n 230,\n 209,\n 242,\n 219,\n 222,\n 208,\n 229,\n 244,\n 228,\n 205,\n 246,\n 234,\n 231,\n 230,\n 221,\n 237,\n 225,\n 242,\n 238,\n 225,\n 212,\n 242,\n 205,\n 229,\n 218,\n 246,\n 238,\n 244,\n 297,\n 986,\n 5326,\n 11440,\n 14208,\n 10889,\n 8389,\n 6146,\n 5526,\n 5608,\n 6183,\n 7177,\n 7727,\n 7702,\n 7060,\n 6040,\n 4775,\n 3793,\n 3340,\n 3318,\n 3667,\n 4870,\n 7658,\n 6513,\n 2217,\n 1212,\n 1051,\n 1109,\n 1213,\n 1356,\n 1820,\n 2734,\n 2675,\n 1900,\n 1551,\n 1305,\n 1324,\n 1619,\n 1645,\n 1543,\n 1347,\n 1307,\n 1268,\n 572,\n 306,\n 246,\n 240,\n 234,\n 228,\n 214,\n 218,\n 222,\n 209,\n 227,\n 264,\n 201,\n 224,\n 214,\n 210,\n 222,\n 205,\n 220,\n 237,\n 205,\n 225,\n 213,\n 214,\n 222,\n 180,\n 235,\n 226,\n 255,\n 207,\n 249,\n 204,\n 222,\n 228,\n 213,\n 204,\n 224,\n 258,\n 207,\n 234,\n 232,\n 211,\n 237,\n 211,\n 216,\n 224,\n 207,\n 196,\n 233,\n 209,\n 221,\n 230,\n 240,\n 205,\n 236,\n 201,\n 221,\n 223,\n 235,\n 248,\n 253,\n 198,\n 218,\n 253,\n 252,\n 215,\n 256,\n 211,\n 214,\n 230,\n 232,\n 230,\n 230,\n 237,\n 238,\n 237,\n 486,\n 3444,\n 8578,\n 9163,\n 9582,\n 10298,\n 10359,\n 10170,\n 9255,\n 9090,\n 8527,\n 7653,\n 6917,\n 6343,\n 4964,\n 4462,\n 3790,\n 2899,\n 2950,\n 2898,\n 2903,\n 4103,\n 5277,\n 5420,\n 2810,\n 961,\n 782,\n 755,\n 764,\n 774,\n 885,\n 937,\n 1085,\n 1593,\n 2554,\n 2801,\n 1932,\n 1475,\n 1423,\n 1336,\n 1353,\n 1217,\n 1381,\n 1524,\n 1097,\n 467,\n 287,\n 229,\n 228,\n 234,\n 230,\n 211,\n 234,\n 227,\n 230,\n 224,\n 213,\n 226,\n 194,\n 213,\n 220,\n 225,\n 233,\n 227,\n 210,\n 227,\n 209,\n 253,\n 207,\n 244,\n 223,\n 217,\n 207,\n 221,\n 220,\n 206,\n 238,\n 213,\n 216,\n 210,\n 208,\n 221,\n 238,\n 260,\n 215,\n 216,\n 206,\n 226,\n 210,\n 221,\n 232,\n 232,\n 216,\n 230,\n 224,\n 231,\n 220,\n 204,\n 241,\n 221,\n 225,\n 206,\n 194,\n 229,\n 225,\n 217,\n 237,\n 201,\n 229,\n 234,\n 206,\n 220,\n 214,\n 216,\n 238,\n 226,\n 237,\n 250,\n 314,\n 1252,\n 5975,\n 7807,\n 8075,\n 8168,\n 9170,\n 7907,\n 8083,\n 7417,\n 6168,\n 6163,\n 5724,\n 5452,\n 5662,\n 5890,\n 4934,\n 4151,\n 3663,\n 2903,\n 3120,\n 3674,\n 3717,\n 3734,\n 4891,\n 4135,\n 1307,\n 797,\n 739,\n 653,\n 675,\n 671,\n 757,\n 799,\n 869,\n 917,\n 1076,\n 1422,\n 2703,\n 2721,\n 1794,\n 1420,\n 1264,\n 1000,\n 1106,\n 1313,\n 1434,\n 1154,\n 456,\n 265,\n 261,\n 249,\n 195,\n 207,\n 246,\n 209,\n 255,\n 217,\n 212,\n 214,\n 226,\n 256,\n 219,\n 228,\n 202,\n 215,\n 211,\n 228,\n 222,\n 220,\n 194,\n 255,\n 215,\n 227,\n 235,\n 219,\n 249,\n 242,\n 236,\n 240,\n 216,\n 212,\n 249,\n 249,\n 201,\n 213,\n 207,\n 211,\n 238,\n 231,\n 205,\n 207,\n 226,\n 249,\n 235,\n 215,\n 197,\n 227,\n 204,\n 241,\n 218,\n 249,\n 218,\n 243,\n 238,\n 233,\n 258,\n 233,\n 230,\n 220,\n 243,\n 214,\n 225,\n 230,\n 218,\n 217,\n 231,\n 262,\n 371,\n 1671,\n 4440,\n 5789,\n 7201,\n 8389,\n 7752,\n 6504,\n 5903,\n 5115,\n 4379,\n 3689,\n 4388,\n 3916,\n 3912,\n 3899,\n 3860,\n 4101,\n 4699,\n 4286,\n 4542,\n 4202,\n 3610,\n 2967,\n 3690,\n 5294,\n 4768,\n 1477,\n 968,\n 875,\n 693,\n 605,\n 717,\n 683,\n 762,\n 781,\n 760,\n 792,\n 837,\n 1004,\n 1436,\n 2763,\n 2912,\n 1767,\n 1205,\n 985,\n 1011,\n 1319,\n 1393,\n 1350,\n 707,\n 355,\n 249,\n 246,\n 235,\n 226,\n 202,\n 217,\n 246,\n 195,\n 196,\n 197,\n 226,\n 199,\n 220,\n 222,\n 237,\n 200,\n 247,\n 227,\n 249,\n 214,\n 235,\n 228,\n 219,\n 210,\n 212,\n 218,\n 229,\n 246,\n 210,\n 221,\n 223,\n 229,\n 206,\n 246,\n 222,\n 228,\n 215,\n 212,\n 209,\n 224,\n 213,\n 251,\n 220,\n 234,\n 205,\n 256,\n 238,\n 250,\n 204,\n 228,\n 231,\n 205,\n 228,\n 233,\n 231,\n 224,\n 205,\n 243,\n 221,\n 237,\n 224,\n 222,\n 239,\n 241,\n 245,\n 225,\n 243,\n 338,\n 1920,\n 4102,\n 4342,\n 4321,\n 4661,\n 4463,\n 3822,\n 5256,\n 4984,\n 3770,\n 2576,\n 3214,\n 3274,\n 2852,\n 3078,\n 3331,\n 3160,\n 3088,\n 3405,\n 4255,\n 4189,\n 4201,\n 4865,\n 3687,\n 3386,\n 6757,\n 5850,\n 1974,\n 1292,\n 1092,\n 810,\n 717,\n 858,\n 849,\n 848,\n 787,\n 637,\n 724,\n 740,\n 850,\n 937,\n 1189,\n 1905,\n 2795,\n 2208,\n 1252,\n 1009,\n 1054,\n 1138,\n 1305,\n 1276,\n 792,\n 366,\n 239,\n 203,\n 225,\n 208,\n 219,\n 236,\n 233,\n 199,\n 201,\n 229,\n 213,\n 228,\n 222,\n 233,\n 225,\n 263,\n 241,\n 213,\n 213,\n 226,\n 232,\n 216,\n 233,\n 222,\n 220,\n 221,\n 219,\n 229,\n 234,\n 236,\n 253,\n 213,\n 247,\n 195,\n 234,\n 213,\n 224,\n 234,\n 212,\n 201,\n 233,\n 209,\n 237,\n 187,\n 231,\n 244,\n 218,\n 256,\n 237,\n 235,\n 229,\n 225,\n 221,\n 219,\n 224,\n 246,\n 258,\n 202,\n 208,\n 227,\n 221,\n 232,\n 232,\n 264,\n 357,\n 2275,\n 4047,\n 4073,\n 3666,\n 3365,\n 2446,\n 1858,\n 2143,\n 2759,\n 2035,\n 1756,\n 2473,\n 2403,\n 1578,\n 1480,\n 2254,\n 3951,\n 3259,\n 2489,\n 2087,\n 3028,\n 4867,\n 4646,\n 4240,\n 5130,\n 5119,\n 7501,\n 7040,\n 2605,\n 1673,\n 1157,\n 934,\n 878,\n 907,\n 994,\n 966,\n 866,\n 881,\n 731,\n 676,\n 789,\n 814,\n 899,\n 1061,\n 1606,\n 2504,\n 2434,\n 1453,\n 971,\n 869,\n 979,\n 1167,\n 1049,\n 859,\n 425,\n 233,\n 204,\n 237,\n 254,\n 213,\n 230,\n 227,\n 205,\n 202,\n 218,\n 230,\n 234,\n 240,\n 215,\n 210,\n 219,\n 211,\n 223,\n 232,\n 224,\n 244,\n 232,\n 218,\n 199,\n 198,\n 217,\n 217,\n 245,\n 226,\n 231,\n 240,\n 231,\n 216,\n 244,\n 202,\n 235,\n 235,\n 233,\n 225,\n 216,\n 209,\n 216,\n 203,\n 238,\n 205,\n 240,\n 233,\n 192,\n 230,\n 198,\n 215,\n 229,\n 236,\n 209,\n 232,\n 213,\n 239,\n 240,\n 222,\n 215,\n 252,\n 253,\n 311,\n 1866,\n 4139,\n 3873,\n 3543,\n 3001,\n 1987,\n 1141,\n 1213,\n 1445,\n 1847,\n 2147,\n 1955,\n 1216,\n 772,\n 629,\n 641,\n 1022,\n 2632,\n 2184,\n 1273,\n 1483,\n 2651,\n 4010,\n 5380,\n 4387,\n 4490,\n 6689,\n 7376,\n 7479,\n 3814,\n 1993,\n 1109,\n 824,\n 836,\n 994,\n 1090,\n 1167,\n 1151,\n 1009,\n 880,\n 797,\n 815,\n 816,\n 805,\n 865,\n 1051,\n 1371,\n 2253,\n 2173,\n 1590,\n 902,\n 881,\n 874,\n 1127,\n 981,\n 861,\n 425,\n 241,\n 220,\n 210,\n 255,\n 215,\n 217,\n 231,\n 230,\n 204,\n 231,\n 219,\n 206,\n 215,\n 206,\n 222,\n 229,\n 221,\n 219,\n 208,\n 228,\n 218,\n 203,\n 220,\n 232,\n 197,\n 211,\n 176,\n 196,\n 222,\n 228,\n 222,\n 217,\n 238,\n 243,\n 192,\n 206,\n 219,\n 250,\n 219,\n 249,\n 230,\n 209,\n 215,\n 214,\n 245,\n 194,\n 231,\n 230,\n 203,\n 228,\n 217,\n 220,\n 211,\n 227,\n 204,\n 236,\n 212,\n 219,\n 209,\n 239,\n 274,\n 607,\n 3429,\n 3608,\n 3472,\n 3041,\n 2109,\n 1303,\n 1255,\n 1176,\n 1034,\n 1129,\n 976,\n 965,\n 767,\n 672,\n 581,\n 566,\n 535,\n 1044,\n 2135,\n 1026,\n 1446,\n 1894,\n 3081,\n 4243,\n 5570,\n 5343,\n 5351,\n 6383,\n 7349,\n 6609,\n 3057,\n 1420,\n 881,\n 835,\n 929,\n 1220,\n 1351,\n 1388,\n 1279,\n 1161,\n 1025,\n 940,\n 819,\n 794,\n 691,\n 872,\n 1076,\n 1109,\n 1642,\n 2034,\n 1847,\n 1196,\n 883,\n 803,\n 854,\n 921,\n 881,\n 475,\n 287,\n 227,\n 223,\n 218,\n 219,\n 234,\n 216,\n 221,\n 222,\n 219,\n 203,\n 210,\n 232,\n 231,\n 227,\n 215,\n 196,\n 249,\n 238,\n 218,\n 180,\n 211,\n 216,\n 215,\n 218,\n 225,\n 216,\n 227,\n 205,\n 232,\n 211,\n 242,\n 228,\n 249,\n 219,\n 233,\n 206,\n 250,\n 223,\n 214,\n 206,\n 225,\n 226,\n 222,\n 233,\n 211,\n 238,\n 219,\n 205,\n 241,\n 229,\n 231,\n 238,\n 261,\n 204,\n 201,\n 219,\n 210,\n 263,\n 267,\n 1327,\n 3855,\n 3536,\n 3165,\n 2334,\n 1494,\n 1263,\n 1170,\n 987,\n 1016,\n 1041,\n 1086,\n 1100,\n 849,\n 619,\n 495,\n 496,\n 553,\n 508,\n 794,\n 1267,\n 1505,\n 2003,\n 2371,\n 2984,\n 4712,\n 6219,\n 6349,\n 5224,\n 6024,\n 7875,\n 5625,\n 2095,\n 1276,\n 1112,\n 1040,\n 1241,\n 1430,\n 1534,\n 1496,\n 1385,\n 1307,\n 1131,\n 1041,\n 956,\n 845,\n 757,\n 822,\n 877,\n 977,\n 1191,\n 1919,\n 1920,\n 1546,\n 897,\n 731,\n 694,\n 810,\n 869,\n 592,\n 266,\n 233,\n 220,\n 231,\n 247,\n 196,\n 226,\n 217,\n 217,\n 217,\n 229,\n 225,\n 214,\n 209,\n 216,\n 212,\n 221,\n 231,\n 230,\n 257,\n 214,\n 227,\n 204,\n 254,\n 219,\n 221,\n 214,\n 233,\n 207,\n 206,\n 215,\n 230,\n 246,\n 198,\n 226,\n 228,\n 206,\n 224,\n 211,\n 239,\n 193,\n 212,\n 233,\n 232,\n 205,\n 213,\n 229,\n 204,\n 204,\n 241,\n 219,\n 222,\n 260,\n 219,\n 255,\n 240,\n 249,\n 232,\n 484,\n 2790,\n 3645,\n 3311,\n 2445,\n 1846,\n 1451,\n 885,\n 971,\n 1046,\n 1204,\n 1175,\n 1155,\n 1119,\n 904,\n 555,\n 571,\n 516,\n 487,\n 428,\n 482,\n 799,\n 1459,\n 1875,\n 2128,\n 2249,\n 3145,\n 4265,\n 4765,\n 5698,\n 5407,\n 5484,\n 6546,\n 4362,\n 2043,\n 1580,\n 1536,\n 1497,\n 1551,\n 1675,\n 1624,\n 1720,\n 1511,\n 1384,\n 1207,\n 1129,\n 923,\n 822,\n 772,\n 760,\n 786,\n 873,\n 1034,\n 1486,\n 1869,\n 1746,\n 1008,\n 680,\n 657,\n 701,\n 869,\n 745,\n 300,\n 230,\n 251,\n 198,\n 198,\n 214,\n 220,\n 242,\n 213,\n 217,\n 233,\n 215,\n 251,\n 214,\n 222,\n 235,\n 229,\n 212,\n 261,\n 205,\n 233,\n 259,\n 228,\n 224,\n 213,\n 225,\n 210,\n 206,\n 203,\n 220,\n 229,\n 230,\n 230,\n 205,\n 218,\n 234,\n 245,\n 236,\n 222,\n 232,\n 240,\n 231,\n 221,\n 214,\n 222,\n 215,\n 227,\n 209,\n 222,\n 215,\n 230,\n 248,\n 207,\n 225,\n 223,\n 266,\n 299,\n 1102,\n 3437,\n 3128,\n 2462,\n 2168,\n 1445,\n 934,\n 754,\n 972,\n 1242,\n 1305,\n 1280,\n 1107,\n 1115,\n 1096,\n 728,\n 463,\n 471,\n 533,\n 458,\n 437,\n 692,\n 1535,\n 1447,\n 1460,\n 1637,\n 1779,\n 2011,\n 2764,\n 3903,\n 5925,\n 5124,\n 5383,\n 5057,\n 3763,\n 2124,\n 1811,\n 1912,\n 2009,\n 1903,\n 1910,\n 1960,\n 1724,\n 1512,\n 1449,\n 1332,\n 1087,\n 905,\n 690,\n 764,\n 672,\n 740,\n 827,\n 841,\n 972,\n 1823,\n 1864,\n 1360,\n 724,\n 633,\n 695,\n 827,\n 832,\n 343,\n 238,\n 214,\n 210,\n 231,\n 240,\n 230,\n 222,\n 223,\n 218,\n 182,\n 218,\n 215,\n 206,\n 215,\n 218,\n 241,\n 219,\n 194,\n 200,\n 218,\n 226,\n 193,\n 221,\n 216,\n 241,\n 239,\n 214,\n 212,\n 204,\n 206,\n 213,\n 214,\n 207,\n 236,\n 231,\n 196,\n 230,\n 239,\n 207,\n 241,\n 217,\n 216,\n 229,\n 216,\n 216,\n 230,\n 209,\n 218,\n 228,\n 226,\n 212,\n 229,\n 218,\n 252,\n 459,\n 1000,\n 2729,\n 2721,\n 2203,\n 1823,\n 1305,\n 913,\n 700,\n 856,\n 1099,\n 1222,\n 1191,\n 1178,\n 1124,\n 1113,\n 999,\n 666,\n 464,\n 463,\n 503,\n 474,\n 503,\n 859,\n 1349,\n 1408,\n 1389,\n 1576,\n 1451,\n 1539,\n 2246,\n 3507,\n 5175,\n 5337,\n 5357,\n 4724,\n 4494,\n 3830,\n 2380,\n 2346,\n 2461,\n 2423,\n 2390,\n 2304,\n 2005,\n 1812,\n 1550,\n 1274,\n 1154,\n 1009,\n 747,\n 670,\n 637,\n 658,\n 729,\n 757,\n 844,\n 1067,\n 1956,\n 1806,\n 1185,\n 683,\n 618,\n 744,\n 941,\n 524,\n 270,\n 222,\n 232,\n 217,\n 226,\n 223,\n 207,\n 226,\n 222,\n 239,\n 205,\n 206,\n 183,\n 200,\n 225,\n 243,\n 227,\n 209,\n 220,\n 221,\n 217,\n 196,\n 208,\n 208,\n 222,\n 232,\n 225,\n 194,\n 231,\n 198,\n 212,\n 229,\n 224,\n 204,\n 214,\n 214,\n 216,\n 208,\n 238,\n 205,\n 216,\n 229,\n 229,\n 250,\n 216,\n 203,\n 215,\n 219,\n 221,\n 223,\n 235,\n 278,\n 506,\n 804,\n 1751,\n 2203,\n 1928,\n 1423,\n 957,\n 767,\n 695,\n 690,\n 943,\n 992,\n 1217,\n 1321,\n 1217,\n 1062,\n 938,\n 717,\n 423,\n 439,\n 447,\n 443,\n 448,\n 590,\n 777,\n 1252,\n 1447,\n 1441,\n 1645,\n 2065,\n 2191,\n 2197,\n 2536,\n 3962,\n 5049,\n 4674,\n 4839,\n 3109,\n 4561,\n 4011,\n 2558,\n 2495,\n 2655,\n 2722,\n 2714,\n 2055,\n 1788,\n 1680,\n 1566,\n 1282,\n 1043,\n 937,\n 874,\n 772,\n 693,\n 672,\n 672,\n 737,\n 816,\n 991,\n 1886,\n 1764,\n 1290,\n 759,\n 688,\n 818,\n 954,\n 387,\n 257,\n 235,\n 209,\n 226,\n 215,\n 228,\n 236,\n 223,\n 237,\n 231,\n 227,\n 222,\n 214,\n 222,\n 219,\n 228,\n 203,\n 211,\n 224,\n 208,\n 212,\n 241,\n 205,\n 221,\n 214,\n 226,\n 216,\n 216,\n 237,\n 212,\n 241,\n 219,\n 233,\n 206,\n 205,\n 222,\n 207,\n 218,\n 236,\n 219,\n 202,\n 218,\n 229,\n 226,\n 230,\n 235,\n 240,\n 229,\n 208,\n 260,\n 309,\n 564,\n 722,\n 1837,\n 1892,\n 1304,\n 911,\n 627,\n 557,\n 533,\n 564,\n 785,\n 1063,\n 1204,\n 1413,\n 1370,\n 1113,\n 766,\n 576,\n 497,\n 432,\n 434,\n 474,\n 460,\n 471,\n 846,\n 988,\n 1552,\n 1495,\n 1616,\n 2314,\n 2786,\n 2771,\n 2577,\n 2982,\n 3835,\n 4552,\n 4702,\n 4577,\n 2866,\n 3666,\n 4750,\n 3103,\n 2637,\n 2688,\n 2665,\n 2679,\n 1911,\n 1742,\n 1610,\n 1509,\n 1468,\n 1128,\n 1050,\n 940,\n 859,\n 651,\n 677,\n 696,\n 703,\n 764,\n 969,\n 1613,\n 2091,\n 1659,\n 909,\n 739,\n 848,\n 915,\n 417,\n 256,\n 249,\n 228,\n 225,\n 236,\n 224,\n 219,\n 235,\n 195,\n 197,\n 229,\n 197,\n 217,\n 218,\n 260,\n 234,\n 227,\n 191,\n 209,\n 238,\n 209,\n 218,\n 201,\n 229,\n 222,\n 242,\n 218,\n 236,\n 213,\n 207,\n 241,\n 227,\n 217,\n 216,\n 219,\n 226,\n 222,\n 219,\n 206,\n 216,\n 235,\n 219,\n 209,\n 242,\n 213,\n 244,\n 222,\n 228,\n 345,\n 559,\n 423,\n 823,\n 1780,\n 1275,\n 825,\n 600,\n 516,\n 488,\n 476,\n 518,\n 583,\n 843,\n 1025,\n 1149,\n 1156,\n 862,\n 639,\n 586,\n 606,\n 499,\n 444,\n 459,\n 492,\n 437,\n 557,\n 952,\n 1557,\n 1919,\n 2098,\n 2626,\n 2877,\n 3020,\n 3046,\n 3027,\n 2627,\n 3097,\n 4402,\n 5255,\n 5234,\n 3024,\n 2741,\n 4533,\n 3417,\n 2694,\n 2810,\n 2700,\n 2837,\n 2058,\n 1720,\n 1638,\n 1477,\n 1379,\n 1281,\n 1208,\n 1023,\n 828,\n 738,\n 760,\n 661,\n 709,\n 754,\n 975,\n 1364,\n 2311,\n 1795,\n 1000,\n 721,\n 830,\n 919,\n 458,\n 250,\n 225,\n 210,\n 214,\n 243,\n 197,\n 218,\n 235,\n 203,\n 208,\n 247,\n 223,\n 238,\n 229,\n 214,\n 219,\n 187,\n 228,\n 220,\n 202,\n 231,\n 209,\n 207,\n 231,\n 245,\n 241,\n 208,\n 212,\n 233,\n 252,\n 198,\n 236,\n 193,\n 219,\n 216,\n 223,\n 243,\n 222,\n 202,\n 229,\n 227,\n 196,\n 245,\n 214,\n 233,\n 214,\n 337,\n 520,\n 450,\n 665,\n 1406,\n 1037,\n 776,\n 599,\n 508,\n 414,\n 494,\n 473,\n 485,\n 481,\n 559,\n 693,\n 684,\n 602,\n 535,\n 503,\n 617,\n 684,\n 614,\n 568,\n 569,\n 491,\n 511,\n 660,\n 913,\n 1789,\n 2085,\n 2470,\n 2489,\n 2277,\n 2277,\n 2441,\n 2331,\n 2544,\n 2714,\n 4415,\n 5151,\n 5442,\n 4120,\n 2383,\n 3194,\n 4160,\n 2965,\n 2646,\n 2579,\n 2817,\n 2480,\n 1828,\n 1559,\n 1591,\n 1584,\n 1618,\n 1555,\n 1378,\n 1258,\n 1012,\n 832,\n 671,\n 680,\n 734,\n 808,\n 1008,\n 1838,\n 2226,\n 1481,\n 947,\n 847,\n 868,\n 621,\n 275,\n 220,\n 237,\n 225,\n 211,\n 235,\n 239,\n 216,\n 216,\n 199,\n 211,\n 204,\n 214,\n 222,\n 219,\n 207,\n 213,\n 227,\n 221,\n 208,\n 249,\n 238,\n 233,\n 252,\n 225,\n 224,\n 229,\n 208,\n 214,\n 221,\n 199,\n 213,\n 219,\n 236,\n 222,\n 237,\n 233,\n 226,\n 227,\n 227,\n 252,\n 203,\n 217,\n 255,\n 237,\n 251,\n 296,\n 393,\n 393,\n 677,\n 1226,\n 792,\n 688,\n 529,\n 465,\n 438,\n 490,\n 450,\n 478,\n 423,\n 439,\n 558,\n 564,\n 617,\n 553,\n 557,\n 687,\n 761,\n 822,\n 741,\n 603,\n 493,\n 580,\n 792,\n 1176,\n 1471,\n 1956,\n 1969,\n 2107,\n 1932,\n 1969,\n 2366,\n 3087,\n 3446,\n 3161,\n 3204,\n 4924,\n 5672,\n 5942,\n 4792,\n 2561,\n 3265,\n 4252,\n 2843,\n 2652,\n 2307,\n 2776,\n 2565,\n 1877,\n 1685,\n 1696,\n 1827,\n 2143,\n 1866,\n 1920,\n 1529,\n 1138,\n 865,\n 706,\n 658,\n 638,\n 845,\n 1029,\n 1716,\n 2352,\n 1584,\n 982,\n 871,\n 929,\n 475,\n 243,\n 253,\n 245,\n 247,\n 200,\n 248,\n 228,\n 205,\n 213,\n 239,\n 204,\n 203,\n 205,\n 217,\n 249,\n 221,\n 230,\n 209,\n 242,\n 207,\n 228,\n 220,\n 239,\n 187,\n 229,\n 229,\n 206,\n 247,\n 220,\n 226,\n 264,\n 226,\n 214,\n 230,\n 199,\n 229,\n 208,\n 225,\n 234,\n 229,\n 250,\n 253,\n 213,\n 203,\n 263,\n 293,\n 260,\n 266,\n 815,\n 1068,\n 854,\n 635,\n 507,\n 477,\n 457,\n 500,\n 477,\n 475,\n 496,\n 469,\n 556,\n 552,\n 575,\n 591,\n 494,\n 622,\n 733,\n 869,\n 842,\n 741,\n 590,\n 702,\n 991,\n 1077,\n 1553,\n 2198,\n 2048,\n 2128,\n 2465,\n 2823,\n 2766,\n 3070,\n 3221,\n 3215,\n 2867,\n 3296,\n 5082,\n 5750,\n 6087,\n 5326,\n 3462,\n 3411,\n 4488,\n 3072,\n 2564,\n 2228,\n 2742,\n 2485,\n 1874,\n 1684,\n 1819,\n 2097,\n 2626,\n 1987,\n 2273,\n 1781,\n 1162,\n 843,\n 638,\n 617,\n 641,\n 705,\n 836,\n 1431,\n 2274,\n 1576,\n 945,\n 850,\n 926,\n 396,\n 241,\n 235,\n 211,\n 254,\n 196,\n 214,\n 248,\n 202,\n 230,\n 216,\n 213,\n 207,\n 207,\n 233,\n 196,\n 184,\n 234,\n 202,\n 206,\n 206,\n 219,\n 215,\n 222,\n 228,\n 237,\n 217,\n 200,\n 210,\n 206,\n 220,\n 224,\n 222,\n 210,\n 234,\n 233,\n 231,\n 253,\n 210,\n 211,\n 216,\n 242,\n 247,\n 208,\n 275,\n 299,\n 234,\n 357,\n 1075,\n 904,\n 744,\n 610,\n 503,\n 476,\n 459,\n 497,\n 488,\n 471,\n 424,\n 492,\n 545,\n 530,\n 500,\n 593,\n 560,\n 522,\n 521,\n 715,\n 870,\n 737,\n 682,\n 801,\n 1083,\n 1085,\n 1575,\n 2453,\n 2550,\n 3059,\n 3167,\n 2805,\n 2771,\n 2660,\n 2149,\n 1950,\n 2081,\n 2517,\n 2989,\n 4574,\n 5026,\n 5962,\n 4770,\n 3650,\n 3750,\n 4396,\n 2822,\n 2313,\n 2394,\n 2856,\n 2261,\n 1869,\n 1695,\n 1744,\n 1970,\n 2450,\n 1858,\n 2038,\n 1542,\n 997,\n 646,\n 627,\n 625,\n 549,\n 703,\n 845,\n 1276,\n 2169,\n 1396,\n 1059,\n 931,\n 880,\n 314,\n 210,\n 211,\n 196,\n 214,\n 230,\n 205,\n 229,\n 210,\n 250,\n 252,\n 237,\n 229,\n 196,\n 223,\n 197,\n 230,\n 229,\n 226,\n 221,\n 211,\n 220,\n 209,\n 230,\n 200,\n 206,\n 230,\n 207,\n 195,\n 217,\n 238,\n 200,\n 204,\n 218,\n 239,\n 221,\n 252,\n 216,\n 208,\n 242,\n 203,\n 202,\n 209,\n 264,\n 251,\n 238,\n 729,\n 1037,\n 796,\n 685,\n 610,\n 517,\n 485,\n 501,\n 500,\n 497,\n 473,\n 409,\n 409,\n 445,\n 466,\n 477,\n 443,\n 490,\n 477,\n 491,\n 565,\n 727,\n 753,\n 757,\n 974,\n 1039,\n 1244,\n 1838,\n 2493,\n 3311,\n 3252,\n 2683,\n 2753,\n 2402,\n 2123,\n 1982,\n 1958,\n 2272,\n 2878,\n 2940,\n 3473,\n 4551,\n 4888,\n 4839,\n 4787,\n 3268,\n 4275,\n 3976,\n 2579,\n 2183,\n 2619,\n 2699,\n 1990,\n 1714,\n 1668,\n 1618,\n 1834,\n 2067,\n 1589,\n 1809,\n 1421,\n 844,\n 606,\n 506,\n 548,\n 643,\n 695,\n 834,\n 1372,\n 2084,\n 1289,\n 1170,\n 964,\n 528,\n 289,\n 228,\n 224,\n 213,\n 249,\n 207,\n 199,\n 223,\n 192,\n 219,\n 214,\n 203,\n 234,\n 223,\n 234,\n 241,\n 230,\n 207,\n 223,\n 241,\n 244,\n 217,\n 234,\n 220,\n 203,\n 228,\n 229,\n 227,\n 224,\n 224,\n 201,\n 217,\n 214,\n 217,\n 220,\n 247,\n 212,\n 205,\n 241,\n 235,\n 218,\n 214,\n 297,\n 287,\n 251,\n 423,\n 1004,\n 870,\n 756,\n 711,\n 581,\n 490,\n 451,\n 479,\n 514,\n 484,\n 541,\n 522,\n 422,\n 456,\n 493,\n 430,\n 461,\n 412,\n 460,\n 507,\n 504,\n 599,\n 760,\n 974,\n 986,\n 1105,\n 1584,\n 3054,\n 3985,\n 3232,\n 3362,\n 1984,\n 1427,\n 1502,\n 1709,\n 1972,\n 1976,\n 1942,\n 1878,\n 2560,\n 3049,\n 3880,\n 4216,\n 3515,\n 4412,\n 3064,\n 3521,\n 4554,\n 3062,\n 2387,\n 2699,\n 2861,\n 1859,\n 1566,\n 1483,\n 1316,\n 1252,\n 1618,\n 1601,\n 1239,\n 1755,\n 941,\n 582,\n 559,\n 597,\n 676,\n 729,\n 848,\n 1104,\n 2020,\n 1702,\n 1201,\n 1012,\n 726,\n 310,\n 234,\n 247,\n 223,\n 213,\n 223,\n 204,\n 223,\n 221,\n 209,\n 201,\n 210,\n 208,\n 202,\n 224,\n 203,\n 191,\n 239,\n 240,\n 232,\n 201,\n 213,\n 211,\n 223,\n 224,\n 240,\n 229,\n 230,\n 197,\n 246,\n 229,\n 226,\n 210,\n 209,\n 210,\n 246,\n 213,\n 211,\n 246,\n 194,\n 260,\n 274,\n 242,\n 284,\n 940,\n 871,\n 737,\n 670,\n 598,\n 534,\n 514,\n 515,\n 479,\n 479,\n 547,\n 544,\n 521,\n 486,\n 555,\n 514,\n 494,\n 468,\n 552,\n 585,\n 547,\n 587,\n 661,\n 1034,\n 1130,\n 1010,\n 1151,\n 2784,\n 4809,\n 3514,\n 2959,\n 1925,\n 2142,\n 2600,\n 2583,\n 2605,\n 2367,\n 2500,\n 2796,\n 2104,\n 1517,\n 1933,\n 2578,\n 2609,\n 3404,\n 3134,\n 2639,\n 3031,\n 4637,\n 3268,\n 2263,\n 2697,\n 2436,\n 1567,\n 1372,\n 1245,\n 1054,\n 1035,\n 1209,\n 1742,\n 1198,\n 1421,\n 1526,\n 802,\n 686,\n 623,\n 657,\n 727,\n 819,\n 944,\n 1743,\n 1988,\n 1349,\n 1017,\n 780,\n 288,\n 229,\n 201,\n 251,\n 235,\n 222,\n 200,\n 247,\n 235,\n 217,\n 223,\n 217,\n 215,\n 225,\n 210,\n 203,\n 213,\n 209,\n 217,\n 221,\n 224,\n 212,\n 190,\n 237,\n 231,\n 202,\n 224,\n 205,\n 223,\n 234,\n 229,\n 215,\n 234,\n 232,\n 224,\n 213,\n 220,\n 228,\n 225,\n 214,\n 254,\n 230,\n 232,\n 491,\n 1004,\n 839,\n 693,\n 622,\n 615,\n 560,\n 532,\n 552,\n 555,\n 546,\n 532,\n 580,\n 549,\n 549,\n 569,\n 585,\n 592,\n 544,\n 593,\n 727,\n 637,\n 707,\n 866,\n 1179,\n 1329,\n 1048,\n 1125,\n 3094,\n 4956,\n 4631,\n 4141,\n 2946,\n 1990,\n 1628,\n 1626,\n 1735,\n 1626,\n 1632,\n 1847,\n 2471,\n 1526,\n 1122,\n 1707,\n 2615,\n 3261,\n 2737,\n 2672,\n 2120,\n 3447,\n 4503,\n 2815,\n 2595,\n 2755,\n 1729,\n 1325,\n 1195,\n 1119,\n 1177,\n 1132,\n 1249,\n 1849,\n 1154,\n 1467,\n 1697,\n 920,\n 746,\n 673,\n 609,\n 686,\n 759,\n 926,\n 1755,\n 2218,\n 1300,\n 974,\n 666,\n 257,\n 230,\n 216,\n 217,\n 249,\n 216,\n 211,\n 221,\n 221,\n 233,\n 236,\n 221,\n 212,\n 193,\n 221,\n 217,\n 235,\n 208,\n 224,\n 217,\n 202,\n 222,\n 207,\n 231,\n 218,\n 238,\n 213,\n 216,\n 195,\n 205,\n 225,\n 220,\n 235,\n 249,\n 192,\n 197,\n 255,\n 233,\n 189,\n 292,\n 258,\n 303,\n 978,\n 881,\n 807,\n 702,\n 646,\n 607,\n 664,\n 560,\n 628,\n 585,\n 615,\n 625,\n 655,\n 681,\n 679,\n 709,\n 708,\n 630,\n 602,\n 588,\n 643,\n 646,\n 853,\n 1203,\n 1397,\n 1216,\n 1000,\n 1657,\n 4989,\n 6273,\n 3374,\n 1905,\n 1548,\n 1392,\n 1201,\n 1079,\n 885,\n 802,\n 855,\n 1025,\n 1845,\n 2307,\n 1003,\n 1590,\n 2845,\n 3643,\n 2330,\n 2950,\n 1724,\n 2525,\n 4314,\n 2923,\n 2252,\n 2553,\n 1800,\n 1420,\n 1282,\n 1196,\n 1200,\n 1245,\n 1302,\n 1873,\n 1739,\n 1228,\n 1938,\n 1400,\n 784,\n 611,\n 605,\n 594,\n 632,\n 761,\n 1116,\n 2242,\n 1690,\n 961,\n 764,\n 347,\n 202,\n 218,\n 218,\n 215,\n 236,\n 227,\n 203,\n 233,\n 204,\n 205,\n 205,\n 235,\n 222,\n 209,\n 199,\n 204,\n 219,\n 203,\n 232,\n 241,\n 244,\n 212,\n 210,\n 219,\n 200,\n 234,\n 209,\n 218,\n 213,\n 221,\n 236,\n 212,\n 189,\n 219,\n 221,\n 223,\n 222,\n 212,\n 251,\n 263,\n 417,\n 1038,\n 879,\n 855,\n 716,\n 645,\n 665,\n 715,\n 673,\n 724,\n 646,\n 681,\n 697,\n 745,\n 816,\n 787,\n 800,\n 826,\n 790,\n 764,\n 724,\n 730,\n 764,\n 833,\n 1188,\n 1416,\n 1302,\n 1253,\n 3056,\n 4154,\n 3588,\n 1802,\n 913,\n 763,\n 823,\n 897,\n 740,\n 526,\n 557,\n 560,\n 509,\n 892,\n 2070,\n 1849,\n 1378,\n 1991,\n 3485,\n 2827,\n 2719,\n 1616,\n 1864,\n 3178,\n 3663,\n 2294,\n 2284,\n 2190,\n 1602,\n 1433,\n 1394,\n 1348,\n 1321,\n 1293,\n 1435,\n 1784,\n 1543,\n 1257,\n 1880,\n 1182,\n 698,\n 637,\n 614,\n 579,\n 648,\n 738,\n 1231,\n 2202,\n 1340,\n 879,\n 586,\n 259,\n 214,\n 225,\n 211,\n 204,\n 209,\n 217,\n 208,\n 201,\n 204,\n 231,\n 236,\n 193,\n 194,\n 229,\n 209,\n 217,\n 218,\n 214,\n 205,\n 221,\n 204,\n 206,\n 241,\n 213,\n 233,\n 214,\n 195,\n 216,\n 177,\n 216,\n 226,\n 231,\n 198,\n 237,\n 210,\n 203,\n 232,\n 252,\n 437,\n 1066,\n 984,\n 874,\n 819,\n 796,\n 775,\n 704,\n 720,\n 684,\n 678,\n 675,\n 716,\n 780,\n 882,\n 1037,\n 1166,\n 1061,\n 956,\n 834,\n 852,\n 796,\n 714,\n 813,\n 964,\n 1271,\n 1342,\n 1308,\n 3308,\n 3089,\n 2795,\n 2717,\n 940,\n 649,\n 673,\n 710,\n 622,\n 545,\n 696,\n 1356,\n 1595,\n 1067,\n 893,\n 1902,\n 2049,\n 1837,\n 1793,\n 2282,\n 3222,\n 1904,\n 1288,\n 1419,\n 2752,\n 3880,\n 2383,\n 2177,\n 1988,\n 1736,\n 1604,\n 1570,\n 1655,\n 1401,\n 1297,\n 1394,\n 1480,\n 1790,\n 1206,\n 1528,\n 1585,\n 749,\n 699,\n 599,\n 568,\n 558,\n 627,\n 852,\n 1793,\n 1497,\n 831,\n 689,\n 338,\n 210,\n 230,\n 234,\n 199,\n 224,\n 225,\n 223,\n 228,\n 209,\n 235,\n 201,\n 243,\n 192,\n 231,\n 222,\n 210,\n 217,\n 223,\n 240,\n 228,\n 233,\n 237,\n 204,\n 221,\n 221,\n 227,\n 209,\n 228,\n 224,\n 229,\n 204,\n 228,\n 236,\n 219,\n 212,\n 226,\n 264,\n 283,\n 838,\n 1252,\n 1050,\n 963,\n 952,\n 1030,\n 1064,\n 982,\n 715,\n 703,\n 660,\n 699,\n 750,\n 881,\n 1301,\n 1709,\n 1585,\n 1462,\n 1267,\n 1143,\n 1148,\n 1115,\n 942,\n 919,\n 983,\n 1109,\n 1075,\n 1597,\n 3990,\n 1878,\n 2751,\n 2907,\n 1155,\n 954,\n 1038,\n 965,\n 1129,\n 1086,\n 999,\n 1267,\n 1347,\n 1869,\n 1553,\n 1568,\n 2532,\n 1893,\n 1371,\n 1804,\n 2999,\n 2191,\n 1424,\n 1269,\n 2102,\n 4319,\n 2880,\n 2163,\n 2065,\n 1897,\n 1639,\n 1674,\n 1543,\n 1420,\n 1352,\n 1311,\n 1252,\n 1520,\n 1517,\n 1166,\n 1903,\n 1266,\n 825,\n 674,\n 570,\n 494,\n 501,\n 641,\n 944,\n 1860,\n 1148,\n 755,\n 579,\n 280,\n 208,\n 226,\n 217,\n 226,\n 236,\n 192,\n 221,\n 216,\n 232,\n 201,\n 234,\n 208,\n 208,\n 232,\n 237,\n 187,\n 205,\n 237,\n 252,\n 218,\n 235,\n 226,\n 195,\n 215,\n 208,\n 211,\n 236,\n 230,\n 225,\n 222,\n 204,\n 227,\n 196,\n 223,\n 215,\n 236,\n 322,\n 1380,\n 1748,\n 1498,\n 1251,\n 1366,\n 2209,\n 2589,\n 2143,\n 1253,\n 706,\n 705,\n 683,\n 869,\n 1266,\n 1945,\n 2237,\n 1941,\n 1666,\n 1512,\n 1319,\n 1259,\n 1144,\n 1095,\n 1120,\n 1098,\n 955,\n 903,\n 1813,\n 3516,\n 1637,\n 2592,\n 3963,\n 2050,\n 1636,\n 1524,\n 1335,\n 1115,\n 1023,\n 989,\n 1121,\n 1202,\n 1394,\n 1827,\n 1389,\n 2502,\n 1957,\n 1924,\n 2067,\n 1716,\n 2558,\n 2011,\n 1675,\n 1669,\n 3108,\n 4503,\n 2474,\n 2130,\n 2018,\n 1812,\n 1644,\n 1497,\n 1502,\n 1277,\n 1311,\n 1298,\n 1267,\n 1506,\n 1601,\n 1416,\n 1955,\n 1277,\n 995,\n 795,\n 657,\n 535,\n 556,\n 641,\n 1063,\n 1805,\n 905,\n 716,\n 414,\n 263,\n 216,\n 217,\n 223,\n 228,\n 222,\n 216,\n 236,\n 221,\n 203,\n 229,\n 206,\n 198,\n 226,\n 207,\n 218,\n 226,\n 234,\n 216,\n 195,\n 227,\n 225,\n 220,\n 219,\n 204,\n 209,\n 221,\n 255,\n 218,\n 226,\n 236,\n 210,\n 235,\n 209,\n 196,\n 237,\n 352,\n 1288,\n 1373,\n 928,\n 633,\n 559,\n 934,\n 1985,\n 1672,\n 1543,\n 1224,\n 798,\n 824,\n 1079,\n 1878,\n 2720,\n 2780,\n 2169,\n 1917,\n 2156,\n 2103,\n 1980,\n 1558,\n 1402,\n 1253,\n 1168,\n 1089,\n 963,\n 1975,\n 3143,\n 1720,\n 2017,\n 4870,\n 3508,\n 1916,\n 1379,\n 1301,\n 1142,\n 1084,\n 918,\n 958,\n 1111,\n 1256,\n 1492,\n 1393,\n 2157,\n 3115,\n 2908,\n 1222,\n 703,\n 1779,\n 2203,\n 1991,\n 1868,\n 2574,\n 4668,\n 3746,\n 2452,\n 2246,\n 1942,\n 1829,\n 1510,\n 1438,\n 1356,\n 1372,\n 1427,\n 1215,\n 1284,\n 1547,\n 1657,\n 1981,\n 2400,\n 1766,\n 1442,\n 1195,\n 779,\n 634,\n 651,\n 754,\n 1554,\n 1478,\n 730,\n 658,\n 315,\n 252,\n 219,\n 195,\n 204,\n 205,\n 197,\n 223,\n 213,\n 229,\n 197,\n 190,\n 194,\n 240,\n 202,\n 255,\n 230,\n 224,\n 229,\n 230,\n 221,\n 236,\n 206,\n 202,\n 211,\n 207,\n 215,\n 208,\n 226,\n 216,\n 222,\n 229,\n 208,\n 230,\n 213,\n 224,\n 250,\n 433,\n 338,\n 259,\n 267,\n 291,\n 649,\n 2525,\n 1419,\n 1185,\n 1225,\n 1146,\n 913,\n 1449,\n 2915,\n 3547,\n 3409,\n 3730,\n 3637,\n 2329,\n 1649,\n 1855,\n 1935,\n 2088,\n 2306,\n 2458,\n 2358,\n 1960,\n 2881,\n 3031,\n 2678,\n 2718,\n 3862,\n 4795,\n 2467,\n 1421,\n 1199,\n 1244,\n 1327,\n 1074,\n 1155,\n 1199,\n 1458,\n 1907,\n 1903,\n 2740,\n 3721,\n 1584,\n 786,\n 612,\n 1192,\n 1997,\n 1786,\n 1746,\n 2269,\n 4140,\n 4214,\n 3105,\n 2854,\n 2515,\n 2120,\n 1865,\n 1588,\n 1582,\n 1526,\n 1571,\n 1593,\n 1524,\n 1612,\n 1828,\n 2235,\n 2468,\n 2593,\n 2594,\n 2302,\n 1515,\n 787,\n 705,\n 785,\n 1209,\n 1546,\n 801,\n 735,\n 482,\n 223,\n 251,\n 204,\n 245,\n 219,\n 243,\n 226,\n 213,\n 211,\n 222,\n 231,\n 220,\n 199,\n 213,\n 226,\n 216,\n 210,\n 220,\n 210,\n 215,\n 231,\n 203,\n 233,\n 214,\n 241,\n 207,\n 188,\n 211,\n 222,\n 224,\n 218,\n 210,\n 194,\n 211,\n 202,\n 225,\n 289,\n 236,\n 253,\n 218,\n 233,\n 1139,\n 2871,\n 1595,\n 1128,\n 1018,\n 1294,\n 1484,\n 2135,\n 4496,\n 6025,\n 4774,\n 4392,\n 3098,\n 2456,\n 2345,\n 2255,\n 2168,\n 2290,\n 2450,\n 2474,\n 2790,\n 2917,\n 3549,\n 4803,\n 5838,\n 4569,\n 3927,\n 3821,\n 2823,\n 2121,\n 1902,\n 1742,\n 1806,\n 1685,\n 1707,\n 1845,\n 2340,\n 3117,\n 2686,\n 2078,\n 2837,\n 1057,\n 619,\n 420,\n 638,\n 2019,\n 1747,\n 1544,\n 1750,\n 3106,\n 4248,\n 3303,\n 3194,\n 3154,\n 2825,\n 2277,\n 2066,\n 1952,\n 1811,\n 1987,\n 1999,\n 1677,\n 1696,\n 1848,\n 1853,\n 1960,\n 2114,\n 2483,\n 2943,\n 2705,\n 1764,\n 1061,\n 908,\n 1082,\n 1537,\n 1163,\n 649,\n 679,\n 358,\n 235,\n 244,\n 233,\n 239,\n 206,\n 208,\n 215,\n 215,\n 196,\n 213,\n 232,\n 208,\n 223,\n 233,\n 216,\n 212,\n 228,\n 210,\n 195,\n 223,\n 232,\n 225,\n 205,\n 228,\n 221,\n 229,\n 246,\n 219,\n 225,\n 229,\n 203,\n 203,\n 197,\n 201,\n 203,\n 209,\n 277,\n 234,\n 239,\n 248,\n 975,\n 2242,\n 2729,\n 2188,\n 1595,\n 1572,\n 2509,\n 3981,\n 4778,\n 4213,\n 3530,\n 2412,\n 2033,\n 2055,\n 1924,\n 1862,\n 1807,\n 1894,\n 2212,\n 2416,\n 2525,\n 2441,\n 2294,\n 3710,\n 6523,\n 7018,\n 6481,\n 5525,\n 3551,\n 2489,\n 2142,\n 1942,\n 2121,\n 2063,\n 1852,\n 1877,\n 2157,\n 2226,\n 2020,\n 1777,\n 2370,\n 902,\n 515,\n 465,\n 525,\n 1289,\n 2873,\n 1882,\n 1438,\n 1855,\n 3844,\n 4041,\n 3723,\n 3204,\n 3397,\n 2863,\n 2422,\n 2284,\n 2372,\n 2245,\n 2150,\n 1907,\n 1941,\n 1509,\n 1490,\n 1555,\n 1669,\n 1871,\n 2375,\n 2437,\n 2373,\n 2186,\n 1345,\n 1075,\n 1247,\n 1580,\n 1088,\n 704,\n 665,\n 409,\n 242,\n 227,\n 227,\n 205,\n 193,\n 223,\n 214,\n 231,\n 205,\n 192,\n 226,\n 218,\n 198,\n 226,\n 226,\n 241,\n 190,\n 220,\n 246,\n 185,\n 221,\n 212,\n 217,\n 224,\n 219,\n 187,\n 215,\n 202,\n 238,\n 199,\n 214,\n 265,\n 219,\n 209,\n 230,\n 253,\n 277,\n 275,\n 267,\n 407,\n 767,\n 627,\n 503,\n 1054,\n 2559,\n 3336,\n 2960,\n 2828,\n 2437,\n 1849,\n 1582,\n 1497,\n 1462,\n 1769,\n 2961,\n 4571,\n 5120,\n 4315,\n 3377,\n 3079,\n 2835,\n 3185,\n 4327,\n 7108,\n 6402,\n 5888,\n 5130,\n 4557,\n 4256,\n 3686,\n 2941,\n 2840,\n 2442,\n 1849,\n 1896,\n 2178,\n 2397,\n 2654,\n 2620,\n 1337,\n 857,\n 820,\n 987,\n 1305,\n 1739,\n 2370,\n 3422,\n 2339,\n 2383,\n 3467,\n 5720,\n 5340,\n 3559,\n 3305,\n 3545,\n 2859,\n 2549,\n 2417,\n 2226,\n 2137,\n 1936,\n 1686,\n 1462,\n 1540,\n 1467,\n 1270,\n 1561,\n 1804,\n 1934,\n 1983,\n 1983,\n 1813,\n 1404,\n 1356,\n 1696,\n 1572,\n 858,\n 631,\n 640,\n 329,\n 224,\n 225,\n 190,\n 228,\n 195,\n 193,\n 223,\n 193,\n 219,\n 217,\n 242,\n 214,\n 225,\n 232,\n 206,\n 212,\n 226,\n 180,\n 194,\n 229,\n 214,\n 192,\n 211,\n 230,\n 235,\n 238,\n 228,\n 213,\n 198,\n 241,\n 229,\n 208,\n 208,\n 233,\n 227,\n 220,\n 236,\n 238,\n 247,\n 314,\n 545,\n 1817,\n 2891,\n 2454,\n 1997,\n 1751,\n 1519,\n 1296,\n 1145,\n 1437,\n 2387,\n 4185,\n 4736,\n 3841,\n 2757,\n 2362,\n 2144,\n 1857,\n 1699,\n 1598,\n 1643,\n 3083,\n 5853,\n 5569,\n 5554,\n 5012,\n 3951,\n 3473,\n 3471,\n 3618,\n 3433,\n 3261,\n 3307,\n 3543,\n 4070,\n 4977,\n 3335,\n 2798,\n 2950,\n 2959,\n 2723,\n 2575,\n 2667,\n 2843,\n 3204,\n 3848,\n 4277,\n 3498,\n 3563,\n 4559,\n 4808,\n 3625,\n 3329,\n 3649,\n 2887,\n 2397,\n 2230,\n 2103,\n 1917,\n 1727,\n 1526,\n 1431,\n 1424,\n 1271,\n 1284,\n 1280,\n 1467,\n 1658,\n 1652,\n 1569,\n 1639,\n 1396,\n 1274,\n 1610,\n 1687,\n 984,\n 670,\n 685,\n 405,\n 247,\n 181,\n 196,\n 232,\n 214,\n 240,\n 208,\n 185,\n 223,\n 222,\n 214,\n 238,\n 201,\n 232,\n 222,\n 196,\n 208,\n 209,\n 198,\n 244,\n 226,\n 204,\n 231,\n 247,\n 212,\n 227,\n 230,\n 229,\n 187,\n 227,\n 229,\n 231,\n 224,\n 242,\n 209,\n 256,\n 214,\n 240,\n 291,\n 383,\n 1502,\n 2816,\n 2231,\n 1766,\n 1497,\n 1174,\n 1143,\n 1258,\n 1711,\n 3424,\n 4380,\n 3914,\n 3003,\n 2309,\n 1937,\n 1806,\n 1709,\n 1588,\n 1473,\n 1398,\n 1403,\n 1651,\n 3705,\n 4939,\n 4803,\n 3907,\n 3888,\n 3225,\n 2552,\n 2715,\n 2735,\n 2649,\n 2633,\n 2808,\n 3316,\n 3492,\n 1945,\n 1520,\n 1509,\n 1734,\n 1723,\n 1671,\n 1651,\n 1760,\n 1848,\n 2042,\n 2312,\n 2998,\n 3127,\n 3017,\n 4019,\n 3997,\n 3755,\n 3867,\n 4107,\n 2958,\n 2247,\n 2105,\n 1962,\n 1732,\n 1543,\n 1435,\n 1379,\n 1233,\n 1222,\n 1211,\n 1282,\n 1467,\n 1520,\n 1537,\n 1595,\n 1233,\n 1124,\n 1225,\n 1519,\n 1247,\n 798,\n 688,\n 761,\n 324,\n 213,\n 210,\n 206,\n 177,\n 225,\n 216,\n 199,\n 213,\n 218,\n 221,\n 219,\n 199,\n 232,\n 206,\n 223,\n 239,\n 207,\n 232,\n 212,\n 205,\n 213,\n 227,\n 223,\n 229,\n 221,\n 219,\n 216,\n 218,\n 209,\n 230,\n 200,\n 232,\n 211,\n 213,\n 206,\n 238,\n 219,\n 365,\n 1456,\n 2888,\n 2261,\n 1764,\n 1337,\n 1094,\n 1137,\n 1430,\n 2320,\n 3658,\n 3622,\n 2751,\n 2425,\n 2116,\n 2003,\n 1754,\n 1587,\n 1674,\n 1567,\n 1470,\n 1402,\n 1314,\n 1467,\n 3151,\n 5305,\n 5248,\n 3957,\n 3316,\n 2852,\n 2380,\n 2334,\n 2415,\n 2556,\n 2781,\n 2848,\n 3458,\n 2393,\n 1281,\n 1153,\n 1244,\n 1566,\n 1869,\n 2113,\n 2000,\n 1612,\n 1447,\n 1476,\n 1581,\n 1904,\n 2802,\n 3343,\n 3672,\n 3876,\n 3899,\n 4206,\n 4848,\n 3861,\n 2578,\n 2010,\n 1799,\n 1653,\n 1509,\n 1321,\n 1367,\n 1347,\n 1343,\n 1237,\n 1201,\n 1176,\n 1210,\n 1516,\n 1847,\n 1398,\n 1018,\n 1034,\n 1349,\n 1343,\n 888,\n 719,\n 728,\n 549,\n 255,\n 194,\n 218,\n 210,\n 209,\n 229,\n 217,\n 200,\n 216,\n 254,\n 221,\n 222,\n 220,\n 211,\n 246,\n 210,\n 230,\n 214,\n 221,\n 228,\n 213,\n 211,\n 214,\n 222,\n 224,\n 239,\n 241,\n 211,\n 201,\n 209,\n 215,\n 212,\n 218,\n 218,\n 231,\n 222,\n 330,\n 1429,\n 2884,\n 2193,\n 1499,\n 1142,\n 1096,\n 1274,\n 1510,\n 2766,\n 3224,\n 3109,\n 2477,\n 2066,\n 1951,\n 1866,\n 1685,\n 1558,\n 1607,\n 1646,\n 1698,\n 1559,\n 1533,\n 1543,\n 1926,\n 3963,\n 4471,\n 4848,\n 5008,\n 4188,\n 2957,\n 2665,\n 2784,\n 2628,\n 2588,\n 2576,\n 3064,\n 3635,\n 2423,\n 2219,\n 2348,\n 2382,\n 2454,\n 2813,\n 3086,\n 3104,\n 3080,\n 3014,\n 2628,\n 2069,\n 1952,\n 2825,\n 3985,\n 3752,\n 2837,\n 3172,\n 4240,\n 4175,\n 2977,\n 2517,\n 2180,\n 1721,\n 1600,\n 1305,\n 1237,\n 1192,\n 1199,\n 1315,\n 1271,\n 1098,\n 1100,\n 1100,\n 1297,\n 1680,\n 1680,\n 1194,\n 939,\n 1106,\n 1125,\n 843,\n 716,\n 739,\n 762,\n 308,\n 226,\n 209,\n 209,\n 220,\n 234,\n 209,\n 210,\n 191,\n 229,\n 201,\n 233,\n 206,\n 217,\n 220,\n 215,\n 250,\n 236,\n 221,\n 207,\n 215,\n 214,\n 232,\n 209,\n 201,\n 222,\n 176,\n 251,\n 189,\n 212,\n 232,\n 189,\n 230,\n 211,\n 256,\n 295,\n 1342,\n 2825,\n 2070,\n 1471,\n 1116,\n 1183,\n 1373,\n 1681,\n 2622,\n 3181,\n 2615,\n 2202,\n 2022,\n 1828,\n 1826,\n 1928,\n 1811,\n 1588,\n 1662,\n 1901,\n 1905,\n 1657,\n 1994,\n 1987,\n 2912,\n 4157,\n 5007,\n 5041,\n 5083,\n 4928,\n 3501,\n 2562,\n 2562,\n 2856,\n 3012,\n 3461,\n 4081,\n 3846,\n 3479,\n 3159,\n 3055,\n 2710,\n 2766,\n 3174,\n 3358,\n 3718,\n 3474,\n 3506,\n 3378,\n 3244,\n 3122,\n 3553,\n 4331,\n 3867,\n 2677,\n 2937,\n 2955,\n 2978,\n 2009,\n 1713,\n 2073,\n 1700,\n 1396,\n 1197,\n 1053,\n 1088,\n 1042,\n 1045,\n 1213,\n 1111,\n 1028,\n 1029,\n 1125,\n 1429,\n 1700,\n 1369,\n 945,\n 1011,\n 1018,\n 755,\n 704,\n 637,\n 736,\n 445,\n 234,\n 224,\n 187,\n 218,\n 226,\n 209,\n 225,\n 210,\n 207,\n 196,\n 231,\n 211,\n 210,\n 205,\n 206,\n 219,\n 211,\n 197,\n 220,\n 239,\n 208,\n 238,\n 200,\n 226,\n 220,\n 220,\n 225,\n 204,\n 224,\n 227,\n 236,\n 228,\n 222,\n 253,\n 1039,\n 2862,\n 2203,\n 1397,\n 1054,\n 1124,\n 1318,\n 1630,\n 2583,\n 2865,\n 2391,\n 2194,\n 1904,\n 1871,\n 1863,\n 1916,\n 1912,\n 1824,\n 1783,\n 1891,\n 1976,\n 2013,\n 2061,\n 2183,\n 2059,\n 4039,\n 6282,\n 7167,\n 7812,\n 7000,\n 5615,\n 5040,\n 3924,\n 3944,\n 4756,\n 4789,\n 4641,\n 4337,\n 3284,\n 2817,\n 2622,\n 2653,\n 2392,\n 2552,\n 2750,\n 2854,\n 3157,\n 3456,\n 3459,\n 3447,\n 3576,\n 4200,\n 4210,\n 3156,\n 3122,\n 3303,\n 2698,\n 1801,\n 2057,\n 2094,\n 1547,\n 1433,\n 1808,\n 1646,\n 1225,\n 1051,\n 992,\n 894,\n 951,\n 1053,\n 998,\n 1027,\n 961,\n 1077,\n 1229,\n 1587,\n 1665,\n 1046,\n 958,\n 929,\n 723,\n 680,\n 634,\n 687,\n 627,\n 263,\n 233,\n 191,\n 190,\n 209,\n 194,\n 212,\n 224,\n 203,\n 242,\n 220,\n 194,\n 220,\n 230,\n 212,\n 222,\n 214,\n 240,\n 206,\n 214,\n 215,\n 209,\n 229,\n 217,\n 213,\n 212,\n 195,\n 223,\n 222,\n 213,\n 219,\n 240,\n 230,\n 920,\n 2820,\n 2048,\n 1358,\n 1015,\n 1064,\n 1346,\n 1657,\n 2500,\n 2784,\n 2334,\n 2127,\n 2197,\n 2016,\n 1820,\n 2013,\n 2034,\n 1872,\n 1799,\n 1875,\n 2099,\n 2081,\n 2154,\n 2341,\n 2268,\n 2737,\n 5447,\n 7007,\n 8974,\n 10475,\n 9249,\n 7819,\n 7125,\n 5101,\n 4361,\n 4174,\n 4414,\n 4385,\n 3568,\n 2856,\n 2733,\n 2792,\n 2629,\n 2471,\n 2574,\n 2615,\n 2744,\n 3238,\n 3637,\n 3802,\n 4674,\n 5449,\n 4875,\n 3840,\n 3793,\n 3896,\n 2865,\n 1699,\n 1685,\n 1813,\n 1916,\n 1609,\n 1416,\n 1520,\n 1665,\n 1292,\n 940,\n 826,\n 731,\n 763,\n 763,\n 844,\n 926,\n 955,\n 1040,\n 1313,\n 1526,\n 1532,\n 1253,\n 1034,\n 854,\n 689,\n 623,\n 591,\n 583,\n 644,\n 278,\n 220,\n 194,\n 201,\n 230,\n 232,\n 247,\n 214,\n 247,\n 189,\n 227,\n 220,\n 209,\n 225,\n 210,\n 212,\n 216,\n 219,\n 189,\n 199,\n 233,\n 243,\n 224,\n 189,\n 220,\n 211,\n 219,\n 226,\n 214,\n 254,\n 247,\n 226,\n 390,\n 2179,\n 2608,\n 1604,\n 1024,\n 1003,\n 1250,\n 1570,\n 2038,\n 3064,\n 2520,\n 2192,\n 2261,\n 2132,\n 1832,\n 1682,\n 1765,\n 2064,\n 1840,\n 1766,\n 2112,\n 2353,\n 2358,\n 2299,\n 2308,\n 2081,\n 2769,\n 5329,\n 8081,\n 10706,\n 11995,\n 9758,\n 7566,\n 6361,\n 5872,\n 4401,\n 4197,\n 4470,\n 4642,\n 3468,\n 3276,\n 2969,\n 2618,\n 2446,\n 2393,\n 2504,\n 2458,\n 2528,\n 3143,\n 3885,\n 5863,\n 5775,\n 4395,\n 4154,\n 4666,\n 4512,\n 2764,\n 1632,\n 1483,\n 1476,\n 1716,\n 1628,\n 1663,\n 1570,\n 1294,\n 1284,\n 1467,\n 1065,\n 692,\n 654,\n 653,\n 653,\n 680,\n 809,\n 922,\n 1036,\n 1171,\n 1573,\n 1629,\n 1476,\n 1124,\n 803,\n 614,\n 525,\n 584,\n 570,\n 558,\n 491,\n 248,\n 230,\n 209,\n 190,\n 197,\n 207,\n 237,\n 222,\n 207,\n 221,\n 243,\n 222,\n 216,\n 209,\n 208,\n 206,\n 207,\n 201,\n 238,\n 201,\n 230,\n 215,\n 222,\n 232,\n 251,\n 221,\n 227,\n 209,\n 224,\n 239,\n 262,\n 1209,\n 2899,\n 1982,\n 1221,\n 952,\n 1084,\n 1450,\n 1732,\n 2552,\n 2701,\n 2606,\n 2465,\n 2231,\n 1926,\n 1771,\n 1709,\n 1749,\n 2002,\n 1920,\n 1845,\n 2149,\n 2367,\n 2519,\n 2173,\n 2175,\n 1945,\n 2650,\n 4952,\n 8567,\n 9612,\n 9232,\n 8567,\n 8355,\n 7820,\n 6448,\n 4325,\n 4201,\n 4523,\n 4379,\n 3517,\n 3066,\n 3069,\n 3017,\n 2863,\n 2926,\n 2738,\n 2671,\n 2904,\n 3849,\n 5086,\n 5004,\n 3837,\n 4095,\n 4537,\n 3463,\n 2147,\n 1602,\n 1450,\n 1375,\n 1366,\n 1477,\n 1472,\n 1430,\n 1677,\n 1425,\n 1122,\n 1293,\n 1504,\n 983,\n 937,\n 786,\n 690,\n 700,\n 805,\n 793,\n 932,\n 1005,\n 1157,\n 1582,\n 1689,\n 1354,\n 893,\n 646,\n 598,\n 615,\n 583,\n 488,\n 550,\n 382,\n 192,\n 207,\n 223,\n 202,\n 216,\n 237,\n 218,\n 210,\n 197,\n 195,\n 223,\n 228,\n 205,\n 232,\n 197,\n 221,\n 216,\n 203,\n 235,\n 200,\n 227,\n 223,\n 230,\n 224,\n 231,\n 200,\n 223,\n 249,\n 208,\n 478,\n 2453,\n 2327,\n 1381,\n 983,\n 966,\n 1294,\n 1637,\n 2230,\n 2623,\n 2863,\n 2773,\n 2261,\n 1863,\n 1985,\n 1948,\n 1855,\n 1745,\n 2178,\n 2042,\n 1976,\n 2227,\n 2525,\n 2294,\n 2167,\n 1911,\n 1965,\n 2136,\n 3687,\n 7755,\n 8570,\n 8605,\n 8276,\n 8086,\n 7491,\n 6602,\n 4352,\n 4185,\n 4837,\n 4350,\n 3365,\n 3278,\n 3206,\n 3175,\n 3269,\n 3384,\n 3480,\n 4052,\n 4959,\n 4103,\n 3385,\n 3128,\n 3552,\n 3005,\n 1923,\n 1567,\n 1423,\n 1448,\n 1398,\n 1388,\n 1461,\n 1513,\n 1568,\n 1529,\n 1511,\n 1556,\n 1495,\n 1223,\n 1265,\n 1513,\n 1275,\n 1094,\n 964,\n 959,\n 848,\n 772,\n 712,\n 890,\n 1031,\n 1192,\n 1870,\n 1616,\n 1019,\n 686,\n 620,\n 619,\n 557,\n 470,\n 534,\n 593,\n 292,\n 234,\n 221,\n 244,\n 254,\n 203,\n 231,\n 189,\n 203,\n 232,\n 226,\n 214,\n 212,\n 219,\n 199,\n 207,\n 212,\n 207,\n 217,\n 234,\n 230,\n 226,\n 224,\n 198,\n 234,\n 230,\n 228,\n 213,\n 263,\n 1272,\n 2917,\n 1935,\n 1226,\n 961,\n 1025,\n 1352,\n 1780,\n 2410,\n 2481,\n 2678,\n 2541,\n 2289,\n 2016,\n 2006,\n 2282,\n 2024,\n 1766,\n 2022,\n 1969,\n 2179,\n 2228,\n 2197,\n 2113,\n 2166,\n 1964,\n 1816,\n 1775,\n 2696,\n 5832,\n 8019,\n 7775,\n 7182,\n 7164,\n 8276,\n 7058,\n 4853,\n 4256,\n 4437,\n 3581,\n 3239,\n 3222,\n 3466,\n 3827,\n 4530,\n 5237,\n 5035,\n 4478,\n 3788,\n 3322,\n 3224,\n 2792,\n 1991,\n 1641,\n 1534,\n 1447,\n 1409,\n 1352,\n 1383,\n 1617,\n 1960,\n 1571,\n 1404,\n 1361,\n 1352,\n 1319,\n 1376,\n 1440,\n 1273,\n 1338,\n 1444,\n 1154,\n 1169,\n 1113,\n 1056,\n 975,\n 954,\n 927,\n 1169,\n 1287,\n 2087,\n 1989,\n 1105,\n 753,\n 650,\n 616,\n 509,\n 497,\n 527,\n 626,\n 427,\n 248,\n 218,\n 215,\n 211,\n 204,\n 245,\n 186,\n 198,\n 211,\n 219,\n 222,\n 231,\n 194,\n 201,\n 184,\n 202,\n 209,\n 214,\n 205,\n 209,\n 215,\n 218,\n 233,\n 228,\n 205,\n 224,\n 225,\n 432,\n 2515,\n 2403,\n 1534,\n 1070,\n 1080,\n 1222,\n 1447,\n 2053,\n 2410,\n 2422,\n 2773,\n 2457,\n 2265,\n 2323,\n 2116,\n 2188,\n 1855,\n 1668,\n 1839,\n 2125,\n 2063,\n 2146,\n 2130,\n 1988,\n 1983,\n 1966,\n 1876,\n 1676,\n 2164,\n 3808,\n 7118,\n 7067,\n 6124,\n 6986,\n 8709,\n 7209,\n 6116,\n 5498,\n 5180,\n 4331,\n 4604,\n 5402,\n 6022,\n 5619,\n 4639,\n 4171,\n 3835,\n 3528,\n 2893,\n 1953,\n 1729,\n 1675,\n 1732,\n 1486,\n 1505,\n 1416,\n 1472,\n 1359,\n 1396,\n 1428,\n 1490,\n 1491,\n 1491,\n 1347,\n 1416,\n 1229,\n 1308,\n 1345,\n 1227,\n 1121,\n 1146,\n 1172,\n 1125,\n 1277,\n 1190,\n 1243,\n 1207,\n 1183,\n 1335,\n 1509,\n 1857,\n 2565,\n 1792,\n 932,\n 752,\n 643,\n 530,\n 505,\n 509,\n 569,\n 637,\n 333,\n 204,\n 198,\n 234,\n 218,\n 212,\n 197,\n 217,\n 208,\n 221,\n 198,\n 234,\n 210,\n 242,\n 205,\n 209,\n 207,\n 206,\n 245,\n 227,\n 231,\n 220,\n 213,\n 188,\n 228,\n 225,\n 284,\n 1346,\n 2971,\n 1908,\n 1196,\n 1092,\n 1187,\n 1338,\n 1680,\n 2365,\n 2352,\n 2644,\n 2665,\n 2481,\n 2428,\n 1982,\n 1964,\n 1918,\n 1781,\n 1689,\n 1814,\n 2164,\n 2048,\n 2105,\n 2163,\n 2208,\n 2025,\n 1856,\n 1908,\n 1790,\n 1554,\n 2881,\n 5544,\n 8515,\n 8861,\n 8717,\n 8855,\n 8270,\n 8172,\n 8366,\n 9140,\n 7522,\n 5985,\n 5397,\n 4993,\n 4533,\n 3817,\n 2688,\n 1822,\n 1430,\n 1294,\n 1216,\n 1250,\n 1406,\n 1729,\n 1827,\n 1484,\n 1337,\n 1389,\n 1451,\n 1284,\n 1397,\n 1521,\n 1444,\n 1413,\n 1452,\n 1418,\n 1238,\n 1273,\n 1307,\n 1171,\n 1235,\n 1123,\n 1044,\n 999,\n 970,\n 1072,\n 1221,\n 1246,\n 1260,\n 1271,\n 1736,\n 1945,\n 2726,\n 2614,\n 1462,\n 824,\n 651,\n 538,\n 554,\n 516,\n 530,\n 732,\n 506,\n 223,\n 267,\n 196,\n 218,\n 225,\n 197,\n 222,\n 232,\n 222,\n 210,\n 212,\n 207,\n 218,\n 215,\n 218,\n 206,\n 216,\n 190,\n 223,\n 213,\n 221,\n 190,\n 210,\n 237,\n 235,\n 821,\n 2985,\n 2268,\n 1351,\n 1062,\n 1180,\n 1367,\n 1646,\n 1968,\n 2554,\n 2678,\n 3053,\n 2815,\n 2488,\n 2050,\n 1915,\n 1861,\n 1888,\n 1689,\n 1699,\n 1840,\n 2228,\n 2173,\n 2317,\n 2319,\n 2121,\n 2159,\n 2045,\n 1837,\n 1632,\n 1705,\n 2294,\n 4278,\n 7660,\n 10936,\n 10220,\n 9090,\n 9282,\n 8211,\n 7292,\n 6240,\n 3900,\n 2891,\n 2397,\n 2112,\n 1887,\n 1625,\n 1453,\n 1261,\n 1212,\n 1122,\n 1086,\n 1178,\n 1506,\n 1490,\n 1569,\n 1379,\n 1349,\n 1346,\n 1317,\n 1233,\n 1354,\n 1522,\n 1454,\n 1587,\n 1635,\n 1411,\n 1464,\n 1338,\n 1252,\n 1210,\n 1148,\n 1007,\n 846,\n 920,\n 909,\n 968,\n 1121,\n 1076,\n 1044,\n 1455,\n 1865,\n 1952,\n 2777,\n 2257,\n 1401,\n 664,\n 553,\n 534,\n 534,\n 508,\n 557,\n 674,\n 292,\n 201,\n 217,\n 188,\n 228,\n 197,\n 218,\n 218,\n 199,\n 240,\n 232,\n 231,\n 196,\n 198,\n 220,\n 233,\n 224,\n 212,\n 201,\n 218,\n 194,\n 225,\n 239,\n 222,\n 227,\n 225,\n 983,\n 3085,\n 2177,\n 1292,\n 1091,\n 1236,\n 1477,\n 1594,\n 1995,\n 2765,\n 2772,\n 3034,\n 2820,\n 2376,\n 2084,\n 2004,\n 1870,\n 1982,\n 1921,\n 1726,\n 1759,\n 1974,\n 2147,\n 2191,\n 2240,\n 2143,\n 2111,\n 1932,\n 1859,\n 1856,\n 1657,\n 1922,\n 2477,\n 2860,\n 3021,\n 3477,\n 3573,\n 3672,\n 3672,\n 3690,\n 4498,\n 2533,\n 1713,\n 1600,\n 1359,\n 1356,\n 1432,\n 1302,\n 1110,\n 1090,\n 1033,\n 989,\n 1146,\n 1389,\n 1381,\n 1489,\n 1555,\n 1403,\n 1400,\n 1229,\n 1165,\n 1158,\n 1498,\n 1539,\n 1458,\n 1621,\n 1375,\n 1523,\n 1475,\n 1265,\n 1269,\n 1158,\n 1085,\n 892,\n 866,\n 882,\n 862,\n 906,\n 908,\n 1006,\n 1450,\n 1824,\n 1758,\n 2145,\n 2117,\n 1369,\n 829,\n 582,\n 501,\n 540,\n 532,\n 552,\n 445,\n 262,\n 200,\n 251,\n 210,\n 211,\n 184,\n 211,\n 191,\n 210,\n 198,\n 229,\n 229,\n 209,\n 220,\n 241,\n 231,\n 213,\n 234,\n 223,\n 190,\n 195,\n 233,\n 204,\n 224,\n 221,\n 804,\n 2985,\n 2262,\n 1421,\n 1090,\n 1191,\n 1549,\n 1776,\n 1959,\n 2713,\n 2706,\n 2844,\n 2648,\n 2239,\n 2173,\n 2104,\n 2069,\n 1827,\n 1635,\n 1686,\n 1627,\n 1655,\n 2055,\n 2037,\n 2009,\n 2218,\n 1938,\n 1979,\n 1891,\n 1670,\n 1512,\n 2152,\n 2821,\n 2804,\n 2568,\n 2434,\n 2202,\n 2466,\n 2692,\n 3059,\n 4047,\n 2339,\n 1541,\n 1518,\n 1282,\n 1140,\n 1247,\n 1225,\n 1156,\n 1038,\n 1005,\n 950,\n 1163,\n 1246,\n 1382,\n 1389,\n 1406,\n 1424,\n 1274,\n 1170,\n 1198,\n 1403,\n 1426,\n 1377,\n 1423,\n 1387,\n 1408,\n 1542,\n 1287,\n 1187,\n 1058,\n 1118,\n 1165,\n 956,\n 862,\n 885,\n 899,\n 955,\n 1072,\n 1238,\n 1437,\n 1618,\n 1629,\n 1666,\n 1512,\n 1144,\n 785,\n 592,\n 534,\n 541,\n 480,\n 530,\n 383,\n 228,\n 230,\n 202,\n 219,\n 204,\n 231,\n 218,\n 188,\n 216,\n 205,\n 222,\n 222,\n 217,\n 177,\n 216,\n 226,\n 197,\n 207,\n 195,\n 209,\n 220,\n 202,\n 217,\n 222,\n 241,\n 240,\n 482,\n 2780,\n 2431,\n 1643,\n 1207,\n 1186,\n 1577,\n 1768,\n 1807,\n 2428,\n 2754,\n 2766,\n 2715,\n 2321,\n 2181,\n 1970,\n 2042,\n 1916,\n 1595,\n 1669,\n 1591,\n 1551,\n 1405,\n 1618,\n 1875,\n 1852,\n 2007,\n 1949,\n 1903,\n 1891,\n 1944,\n 1992,\n 2800,\n 3155,\n 3010,\n 2675,\n 2640,\n 2296,\n 2265,\n 2532,\n 2840,\n 3910,\n 2440,\n 1669,\n 1350,\n 1218,\n 1220,\n 1107,\n 1173,\n 1143,\n 1090,\n 1037,\n 972,\n 1231,\n 1339,\n 1218,\n 1424,\n 1579,\n 1375,\n 1260,\n 1240,\n 1181,\n 1254,\n 1417,\n 1465,\n 1354,\n 1468,\n 1356,\n 1434,\n 1427,\n 1417,\n 1132,\n 1049,\n 1168,\n 1261,\n 1077,\n 859,\n 983,\n 1068,\n 1351,\n 1241,\n 1281,\n 1310,\n 1325,\n 1415,\n 1400,\n 1324,\n 1123,\n 820,\n 729,\n 590,\n 518,\n 520,\n 532,\n 417,\n 310,\n 232,\n 193,\n 207,\n 198,\n 199,\n 218,\n 202,\n 195,\n 216,\n 205,\n 218,\n 195,\n 236,\n 193,\n 194,\n 198,\n 203,\n 210,\n 222,\n 214,\n 238,\n 230,\n 341,\n 2103,\n 2645,\n 1660,\n 1264,\n 1195,\n 1356,\n 1587,\n 1826,\n 2091,\n 2861,\n 2962,\n 2824,\n 2310,\n 2234,\n 2014,\n 1915,\n 1730,\n 1669,\n 1590,\n 1614,\n 1567,\n 1635,\n 1491,\n 1437,\n 1485,\n 1624,\n 1640,\n 1771,\n 1823,\n 1552,\n 1646,\n 2569,\n 4118,\n 3228,\n 2924,\n 2638,\n 2525,\n 2385,\n 2374,\n 2865,\n 3498,\n 4410,\n 1952,\n 1534,\n 1299,\n 1312,\n 1296,\n 1208,\n 1092,\n 1099,\n 1056,\n 1139,\n 1194,\n 1378,\n 1236,\n 1232,\n 1367,\n 1280,\n 1212,\n 1189,\n 1306,\n 1290,\n 1275,\n 1336,\n 1445,\n 1424,\n 1505,\n 1120,\n 1160,\n 1288,\n 1266,\n 1139,\n 994,\n 1048,\n 1230,\n 1110,\n 1025,\n 1102,\n 1221,\n 1446,\n 1289,\n 1165,\n 1142,\n 1182,\n 1271,\n 1306,\n 1237,\n 1041,\n 868,\n 688,\n 598,\n 511,\n 436,\n 459,\n 487,\n 600,\n 292,\n 228,\n 214,\n 232,\n 230,\n 205,\n 230,\n 209,\n 236,\n 200,\n 210,\n 213,\n 213,\n 217,\n 194,\n 215,\n 203,\n 202,\n 224,\n 192,\n 204,\n 233,\n 681,\n 2931,\n 2127,\n 1552,\n 1207,\n 1351,\n 1464,\n 1726,\n 1869,\n 2291,\n 3003,\n 3057,\n 2435,\n 2060,\n 2011,\n 1966,\n 1851,\n 1606,\n 1434,\n 1569,\n 1587,\n 1465,\n 1534,\n 1413,\n 1386,\n 1374,\n 1310,\n 1326,\n 1484,\n 1444,\n 1395,\n 1577,\n 3086,\n 4235,\n 3241,\n 2840,\n 2843,\n 2813,\n 2441,\n 2370,\n 2962,\n 3919,\n 4866,\n 2290,\n 1701,\n 1641,\n 1510,\n 1315,\n 1205,\n 1122,\n 1138,\n 1256,\n 1205,\n 1228,\n 1273,\n 1270,\n 1402,\n 1299,\n 1236,\n 1257,\n 1264,\n 1228,\n 1238,\n 1209,\n 1397,\n 1355,\n 1377,\n 1310,\n 1220,\n 1238,\n 1185,\n 1155,\n 1201,\n 1095,\n 1041,\n 1108,\n 1172,\n 1140,\n 1144,\n 1175,\n 1327,\n 1249,\n 1208,\n 1286,\n 1237,\n 1162,\n 1270,\n 1154,\n 990,\n 925,\n 841,\n 678,\n 598,\n 473,\n 459,\n 506,\n 620,\n 588,\n 264,\n 219,\n 212,\n 214,\n 213,\n 216,\n 221,\n 208,\n 192,\n 204,\n 236,\n 230,\n 202,\n 196,\n 204,\n 236,\n 223,\n 227,\n 238,\n 229,\n 318,\n 2155,\n 2840,\n 1770,\n 1506,\n 1318,\n 1388,\n 1598,\n 1813,\n 1903,\n 2909,\n 2857,\n 2624,\n 2127,\n 2082,\n 2165,\n 1975,\n 1855,\n 1576,\n 1507,\n 1523,\n 1591,\n 1486,\n 1438,\n 1372,\n 1295,\n 1396,\n 1336,\n 1326,\n 1318,\n 1346,\n 1506,\n 1583,\n 4305,\n 4270,\n 3353,\n 3027,\n 2927,\n 2903,\n 2903,\n 2833,\n 3069,\n 3702,\n 5644,\n 3330,\n 2188,\n 1764,\n 1503,\n 1558,\n 1342,\n 1207,\n 1275,\n 1213,\n 1289,\n 1346,\n 1208,\n 1156,\n 1210,\n 1295,\n 1307,\n 1291,\n 1385,\n 1233,\n 1192,\n 1189,\n 1420,\n 1463,\n 1519,\n 1428,\n 1149,\n 1148,\n 1104,\n 1060,\n 1123,\n 1157,\n 1091,\n 1056,\n 1147,\n 1178,\n 1181,\n 1265,\n 1395,\n 1393,\n 1315,\n 1228,\n 1164,\n 1204,\n 1201,\n 982,\n 963,\n 1039,\n 958,\n 811,\n 629,\n 498,\n 454,\n 513,\n 601,\n 788,\n 399,\n 216,\n 249,\n 222,\n 209,\n 210,\n 202,\n 215,\n 217,\n 214,\n 188,\n 230,\n 189,\n 231,\n 212,\n 180,\n 213,\n 239,\n 232,\n 228,\n 323,\n 2172,\n 2799,\n 1825,\n 1385,\n 1273,\n 1301,\n 1721,\n 1738,\n 1908,\n 2856,\n 2753,\n 2508,\n 2104,\n 2114,\n 2106,\n 2140,\n 1911,\n 1692,\n 1590,\n 1579,\n 1540,\n 1499,\n 1432,\n 1473,\n 1342,\n 1419,\n 1382,\n 1474,\n 1496,\n 1444,\n 1612,\n 1890,\n 4520,\n 4345,\n 3488,\n 3359,\n 3471,\n 3501,\n 3527,\n 3114,\n 3334,\n 3862,\n 4791,\n 5405,\n 2592,\n 1809,\n 1680,\n 1878,\n 1757,\n 1513,\n 1187,\n 1198,\n 1247,\n 1406,\n 1233,\n 1246,\n 1344,\n 1310,\n 1206,\n 1298,\n 1443,\n 1330,\n 1270,\n 1278,\n 1431,\n 1459,\n 1358,\n 1420,\n 1119,\n 1127,\n 940,\n 1010,\n 936,\n 1073,\n 1099,\n 1006,\n 1148,\n 1156,\n 1109,\n 1271,\n 1340,\n 1547,\n 1436,\n 1423,\n 1330,\n 1291,\n 1246,\n 1034,\n 927,\n 919,\n 1081,\n 834,\n 661,\n 496,\n 522,\n 494,\n 576,\n 648,\n 703,\n 296,\n 213,\n 219,\n 188,\n 222,\n 189,\n 217,\n 220,\n 220,\n 211,\n 229,\n 245,\n 217,\n 190,\n 237,\n 229,\n 232,\n 222,\n 231,\n 525,\n 2773,\n 2478,\n 1631,\n 1307,\n 1261,\n 1470,\n 1666,\n 1561,\n 2219,\n 2881,\n 2501,\n 2572,\n 2172,\n 2151,\n 2058,\n 2114,\n 1941,\n 1708,\n 1521,\n 1574,\n 1577,\n 1419,\n 1509,\n 1486,\n 1527,\n 1655,\n 1894,\n 1814,\n 1668,\n 2073,\n 1964,\n 1550,\n 3222,\n 3869,\n 3214,\n 2965,\n 2799,\n 3208,\n 3214,\n 3119,\n 3197,\n 3800,\n 4353,\n 4867,\n 3127,\n 2006,\n 1836,\n 1810,\n 1578,\n 1564,\n 1539,\n 1429,\n 1414,\n 1332,\n 1171,\n 1239,\n 1232,\n 1296,\n 1303,\n 1362,\n 1570,\n 1568,\n 1496,\n 1380,\n 1399,\n 1399,\n 1344,\n 1379,\n 1161,\n 1068,\n 1046,\n 943,\n 880,\n 879,\n 946,\n 1062,\n 1126,\n 1194,\n 1175,\n 1077,\n 1226,\n 1289,\n 1395,\n 1426,\n 1454,\n 1418,\n 1214,\n 1212,\n 1222,\n 1077,\n 916,\n 689,\n 691,\n 588,\n 517,\n 535,\n 558,\n 658,\n 631,\n 474,\n 252,\n 215,\n 227,\n 238,\n 237,\n 215,\n 247,\n 218,\n 231,\n 235,\n 202,\n 200,\n 214,\n 229,\n 238,\n 230,\n 232,\n 298,\n 2318,\n 2921,\n 1775,\n 1287,\n 1122,\n 1411,\n 1517,\n 1504,\n 2138,\n 2763,\n 2477,\n 2633,\n 2334,\n 2086,\n 2020,\n 2084,\n 1905,\n 1659,\n 1513,\n 1465,\n 1564,\n 1627,\n 1666,\n 1833,\n 2335,\n 2435,\n 1912,\n 1946,\n 2148,\n 1883,\n 1655,\n 1566,\n 2084,\n 3995,\n 2867,\n 2576,\n 2435,\n 2404,\n 2587,\n 3297,\n 3697,\n 3542,\n 3826,\n 4447,\n 2662,\n 2079,\n 1774,\n 1653,\n 1520,\n 1497,\n 1468,\n 1549,\n 1637,\n 1632,\n 1326,\n 1319,\n 1206,\n 1097,\n 1337,\n 1514,\n 1506,\n 1634,\n 1443,\n 1433,\n 1368,\n 1473,\n 1360,\n 1328,\n 1285,\n 1057,\n 1023,\n 987,\n 923,\n 939,\n 849,\n 952,\n 950,\n 1092,\n 1122,\n 1129,\n 1225,\n 1216,\n 1240,\n 1253,\n 1501,\n 1512,\n 1358,\n 1317,\n 1234,\n 1155,\n 1049,\n 926,\n 716,\n 670,\n 556,\n 481,\n 515,\n 568,\n 574,\n 654,\n 468,\n 245,\n 201,\n 189,\n 228,\n 201,\n 197,\n 222,\n 216,\n 189,\n 203,\n 203,\n 218,\n 226,\n 207,\n 228,\n 236,\n 259,\n 498,\n 2716,\n 2520,\n 1619,\n 1174,\n 1196,\n 1467,\n 1391,\n 1599,\n 2450,\n 2605,\n 2540,\n 2466,\n 2383,\n 2195,\n 2034,\n 1897,\n 1892,\n 1589,\n 1474,\n 1851,\n 1936,\n 1886,\n 2110,\n 2682,\n 2349,\n 2288,\n 2187,\n 2021,\n 2025,\n 1940,\n 1605,\n 1532,\n 2368,\n 4300,\n 3305,\n 2584,\n 2400,\n 2027,\n 2242,\n 3716,\n 3671,\n 3204,\n 3288,\n 3584,\n 2291,\n 1894,\n 1798,\n 1598,\n 1488,\n 1435,\n 1456,\n 1499,\n 1612,\n 1773,\n 1637,\n 1452,\n 1333,\n 1240,\n 1254,\n 1223,\n 1451,\n 1507,\n 1397,\n 1305,\n 1243,\n 1344,\n 1413,\n 1364,\n 1178,\n 961,\n 968,\n 1058,\n 849,\n 832,\n 826,\n 858,\n 874,\n 926,\n 970,\n 1080,\n 1146,\n 1139,\n 1153,\n 1237,\n 1205,\n 1117,\n 1093,\n 1034,\n 1119,\n 1124,\n 1040,\n 977,\n 699,\n 596,\n 618,\n 497,\n 557,\n 492,\n 560,\n 573,\n 610,\n 286,\n 234,\n 208,\n 245,\n 223,\n 225,\n 194,\n 203,\n 211,\n 227,\n 214,\n 222,\n 185,\n 194,\n 231,\n 227,\n 237,\n 325,\n 1842,\n 2807,\n 1890,\n 1252,\n 1180,\n 1358,\n 1302,\n 1566,\n 2116,\n 2657,\n 2470,\n 2543,\n 2285,\n 2164,\n 2134,\n 2032,\n 1933,\n 1885,\n 1957,\n 2335,\n 2469,\n 2464,\n 2635,\n 2451,\n 2500,\n 2224,\n 2223,\n 2405,\n 2114,\n 2060,\n 2094,\n 1784,\n 1988,\n 3322,\n 3639,\n 2889,\n 2847,\n 3248,\n 2160,\n 2777,\n 2987,\n 3060,\n 3374,\n 3083,\n 2170,\n 1835,\n 1715,\n 1613,\n 1566,\n 1551,\n 1422,\n 1450,\n 1598,\n 1800,\n 1691,\n 1791,\n 1625,\n 1353,\n 1311,\n 1261,\n 1417,\n 1524,\n 1395,\n 1306,\n 1308,\n 1388,\n 1463,\n 1358,\n 1326,\n 1080,\n 996,\n 929,\n 1016,\n 1000,\n 949,\n 900,\n 882,\n 924,\n 832,\n 963,\n 1033,\n 1041,\n 999,\n 1155,\n 1089,\n 934,\n 1013,\n 887,\n 917,\n 1051,\n 1069,\n 1014,\n 920,\n 641,\n 553,\n 588,\n 541,\n 525,\n 506,\n 531,\n 612,\n 499,\n 239,\n 200,\n 197,\n 213,\n 185,\n 178,\n 209,\n 216,\n 189,\n 213,\n 233,\n 211,\n 205,\n 211,\n 206,\n 220,\n 490,\n 2458,\n 2599,\n 1767,\n 1238,\n 1245,\n 1306,\n 1238,\n 1550,\n 2146,\n 2559,\n 2693,\n 2426,\n 2148,\n 2134,\n 2000,\n 2285,\n 2131,\n 2095,\n 2438,\n 2758,\n 2331,\n 2363,\n 2605,\n 2759,\n 2457,\n 2232,\n 2418,\n 2586,\n 2662,\n 2450,\n 2487,\n 2357,\n 2352,\n 3383,\n 3216,\n 2889,\n 2755,\n 2564,\n 1930,\n 2506,\n 2940,\n 2986,\n 3291,\n 3566,\n 2229,\n 1635,\n 1610,\n 1529,\n 1577,\n 1649,\n 1549,\n 1437,\n 1488,\n 1681,\n 1553,\n 1731,\n 1826,\n 1662,\n 1446,\n 1330,\n 1373,\n 1449,\n 1361,\n 1272,\n 1279,\n 1331,\n 1368,\n 1408,\n 1402,\n 1258,\n 1103,\n 963,\n 941,\n 1040,\n 1173,\n 996,\n 922,\n 866,\n 901,\n 908,\n 939,\n 939,\n 1060,\n 1071,\n 922,\n 743,\n 820,\n 895,\n 855,\n 886,\n 948,\n 982,\n 907,\n 767,\n 530,\n 513,\n 575,\n 541,\n 527,\n 561,\n 586,\n 705,\n 302,\n 221,\n 227,\n 225,\n 213,\n 225,\n 188,\n 194,\n 216,\n 223,\n 210,\n 214,\n 209,\n 217,\n 197,\n 257,\n 858,\n 2976,\n 2312,\n 1573,\n 1228,\n 1287,\n 1268,\n 1395,\n 1666,\n 2429,\n 2676,\n 2419,\n 2370,\n 2052,\n 2028,\n 2064,\n 2175,\n 1998,\n 2074,\n 2579,\n 2165,\n 2528,\n 2714,\n 2869,\n 2320,\n 2255,\n 2569,\n 2585,\n 2502,\n 2769,\n 2510,\n 2809,\n 2522,\n 2304,\n 3437,\n 2869,\n 2702,\n 2955,\n 2443,\n 1810,\n 2419,\n 3229,\n 3287,\n 3305,\n 3710,\n 2218,\n 1568,\n 1612,\n 1446,\n 1450,\n 1390,\n 1520,\n 1486,\n 1519,\n 1490,\n 1600,\n 1671,\n 1773,\n 1731,\n 1475,\n 1486,\n 1441,\n 1401,\n 1350,\n 1299,\n 1247,\n 1242,\n 1271,\n 1445,\n 1262,\n 1216,\n 1046,\n 815,\n 871,\n 859,\n 854,\n 932,\n 919,\n 952,\n 949,\n 920,\n 866,\n 912,\n 832,\n 725,\n 744,\n 787,\n 820,\n 790,\n 752,\n 847,\n 866,\n 930,\n 851,\n 771,\n 705,\n 512,\n 582,\n 562,\n 538,\n 548,\n 621,\n 707,\n 309,\n 221,\n 204,\n 209,\n 195,\n 199,\n 222,\n 196,\n 206,\n 220,\n 203,\n 223,\n 228,\n 209,\n 237,\n 331,\n 1885,\n 2880,\n 1874,\n 1263,\n 1201,\n 1218,\n 1312,\n 1601,\n 2046,\n 2756,\n 2484,\n 2373,\n 2067,\n 1915,\n 1839,\n 2180,\n 1914,\n 2140,\n 2669,\n 2450,\n 2344,\n 2578,\n 2764,\n 2348,\n 2169,\n 2276,\n 2303,\n 2516,\n 2771,\n 2934,\n 2996,\n 2907,\n 2850,\n 2580,\n 3641,\n 3264,\n 3210,\n 3278,\n 2662,\n 2100,\n 2297,\n 3030,\n 3065,\n 3263,\n 3622,\n 2071,\n 1845,\n 1541,\n 1404,\n 1449,\n 1423,\n 1508,\n 1519,\n 1563,\n 1604,\n 1650,\n 1700,\n 1615,\n 1586,\n 1581,\n 1667,\n 1618,\n 1351,\n 1268,\n 1211,\n 1199,\n 1207,\n 1267,\n 1340,\n 1224,\n 1105,\n 912,\n 856,\n 738,\n 694,\n 686,\n 726,\n 808,\n 736,\n 656,\n 735,\n 654,\n 669,\n 570,\n 646,\n 707,\n 790,\n 751,\n 829,\n 798,\n 751,\n 775,\n 810,\n 873,\n 774,\n 649,\n 510,\n 503,\n 542,\n 514,\n 521,\n 651,\n 738,\n 415,\n 192,\n 206,\n 207,\n 213,\n 212,\n 206,\n 223,\n 213,\n 200,\n 211,\n 226,\n 245,\n 213,\n 252,\n 291,\n 1745,\n 2944,\n 1984,\n 1344,\n 1200,\n 1192,\n 1317,\n 1496,\n 1977,\n 2742,\n 2489,\n 2259,\n 1909,\n 1821,\n 1776,\n 2175,\n 2136,\n 2256,\n 2767,\n 2459,\n 2456,\n 2495,\n 2272,\n 2181,\n 2351,\n 2352,\n 2364,\n 2761,\n 2876,\n 2724,\n 2614,\n 2516,\n 2718,\n 2314,\n 3324,\n 2739,\n 2735,\n 3061,\n 2591,\n 1930,\n 1705,\n 2319,\n 2879,\n 2894,\n 3447,\n 2798,\n 1753,\n 1648,\n 1393,\n 1353,\n 1390,\n 1428,\n 1450,\n 1493,\n 1554,\n 1552,\n 1674,\n 1597,\n 1541,\n 1516,\n 1579,\n 1646,\n 1605,\n 1400,\n 1295,\n 1138,\n 1148,\n 1291,\n 1257,\n 1276,\n 1141,\n 977,\n 874,\n 777,\n 736,\n 649,\n 698,\n 640,\n 742,\n 595,\n 578,\n 574,\n 709,\n 664,\n 668,\n 654,\n 655,\n 687,\n 676,\n 715,\n 739,\n 725,\n 747,\n 819,\n 721,\n 680,\n 574,\n 544,\n 542,\n 488,\n 509,\n 575,\n 656,\n 732,\n 321,\n 242,\n 220,\n 188,\n 201,\n 253,\n 206,\n 228,\n 205,\n 214,\n 188,\n 221,\n 213,\n 225,\n 403,\n 2197,\n 2826,\n 1799,\n 1278,\n 1234,\n 1115,\n 1350,\n 1601,\n 2249,\n 2616,\n 2415,\n 2152,\n 1767,\n 1599,\n 1933,\n 2468,\n 2171,\n 2274,\n 2950,\n 2790,\n 2474,\n 2294,\n 2237,\n 2253,\n 2355,\n 2370,\n 2740,\n 2795,\n 2467,\n 2434,\n 2523,\n 2431,\n 2490,\n 2032,\n 3200,\n 2653,\n 2432,\n 2349,\n 2161,\n 1689,\n 1614,\n 2091,\n 2688,\n 2594,\n 3259,\n 2841,\n 1658,\n 1492,\n 1436,\n 1355,\n 1183,\n 1302,\n 1334,\n 1395,\n 1507,\n 1535,\n 1674,\n 1612,\n 1572,\n 1579,\n 1600,\n 1634,\n 1580,\n 1505,\n 1422,\n 1254,\n 1219,\n 1244,\n 1337,\n 1235,\n 1187,\n 879,\n 803,\n 800,\n 719,\n 733,\n 629,\n 657,\n 712,\n 701,\n 622,\n 604,\n 605,\n 527,\n 587,\n 661,\n 622,\n 594,\n 606,\n 708,\n 693,\n 683,\n 687,\n 675,\n 698,\n 612,\n 632,\n 479,\n 557,\n 558,\n 528,\n 610,\n 675,\n 784,\n 304,\n 209,\n 196,\n 183,\n 227,\n 191,\n 206,\n 208,\n 229,\n 212,\n 243,\n 203,\n 236,\n 261,\n 1513,\n 2968,\n 2073,\n 1458,\n 1237,\n 1184,\n 1373,\n 1663,\n 2008,\n 2523,\n 2350,\n 2251,\n 1800,\n 1617,\n 1736,\n 2916,\n 2816,\n 1988,\n 2760,\n 3127,\n 2333,\n 2248,\n 2281,\n 2298,\n 2393,\n 2531,\n 2781,\n 2685,\n 2480,\n 2588,\n 2518,\n 2526,\n 2677,\n 2248,\n 3214,\n 2997,\n 2657,\n 2531,\n 2700,\n 1882,\n 1531,\n 1593,\n 2342,\n 2498,\n 2740,\n 3602,\n 2234,\n 1557,\n 1387,\n 1343,\n 1175,\n 1167,\n 1210,\n 1300,\n 1329,\n 1420,\n 1430,\n 1549,\n 1504,\n 1571,\n 1740,\n 1897,\n 1767,\n 1626,\n 1484,\n 1279,\n 1275,\n 1276,\n 1300,\n 1306,\n 1164,\n 1011,\n 797,\n 767,\n 923,\n 790,\n 750,\n 750,\n 616,\n 591,\n 536,\n 607,\n 553,\n 486,\n 520,\n 579,\n 538,\n 563,\n 583,\n 593,\n 591,\n 727,\n 604,\n 644,\n 637,\n 562,\n 571,\n 524,\n 505,\n 569,\n 569,\n 560,\n 654,\n 750,\n 663,\n 282,\n 180,\n 203,\n 224,\n 211,\n 192,\n 221,\n 194,\n 206,\n 218,\n 191,\n 222,\n 251,\n 403,\n 2165,\n 2746,\n 1762,\n 1295,\n 1170,\n 1088,\n 1401,\n 1826,\n 2216,\n 2320,\n 2338,\n 2060,\n 1935,\n 1776,\n 1971,\n 3369,\n 2278,\n 2057,\n 2480,\n 2160,\n 2202,\n 2170,\n 2168,\n 2500,\n 2496,\n 2665,\n 2818,\n 2414,\n 2364,\n 2230,\n 2315,\n 2588,\n 2536,\n 2457,\n 3243,\n 2789,\n 2194,\n 2935,\n 2695,\n 1607,\n 1338,\n 1351,\n 2227,\n 2486,\n 2599,\n 3328,\n 2435,\n 1495,\n 1269,\n 1324,\n 1068,\n 1112,\n 1109,\n 1141,\n 1282,\n 1420,\n 1369,\n 1385,\n 1440,\n 1625,\n 1632,\n 1661,\n 1628,\n 1551,\n 1473,\n 1354,\n 1344,\n 1318,\n 1399,\n 1367,\n 1215,\n 957,\n 739,\n 783,\n 883,\n 760,\n 671,\n 556,\n 602,\n 584,\n 556,\n 596,\n 587,\n 459,\n 452,\n 490,\n 594,\n 533,\n 542,\n 574,\n 567,\n 545,\n 539,\n 539,\n 590,\n 551,\n 608,\n 527,\n 524,\n 541,\n 548,\n 595,\n 647,\n 777,\n 675,\n 276,\n 205,\n 203,\n 202,\n 213,\n 213,\n 175,\n 221,\n 217,\n 225,\n 211,\n 203,\n 210,\n 253,\n 983,\n 2921,\n 2191,\n 1431,\n 1249,\n 1058,\n 1292,\n 1667,\n 2057,\n 2262,\n 2356,\n 2209,\n 1983,\n 1826,\n 1969,\n 2282,\n 2177,\n 1896,\n 2118,\n 1816,\n 1964,\n 2288,\n 2194,\n 2419,\n 2495,\n 2829,\n 3195,\n 2761,\n 2129,\n 1902,\n 2098,\n 2287,\n 2132,\n 2581,\n 2901,\n 3032,\n 2407,\n 2948,\n 3594,\n 2258,\n 1499,\n 1853,\n 1769,\n 2432,\n 2623,\n 2700,\n 3312,\n 2378,\n 1588,\n 1232,\n 1036,\n 1153,\n 1173,\n 1036,\n 951,\n 1217,\n 1287,\n 1215,\n 1361,\n 1398,\n 1488,\n 1535,\n 1468,\n 1589,\n 1530,\n 1361,\n 1299,\n 1293,\n 1340,\n 1311,\n 1364,\n 1121,\n 887,\n 882,\n 898,\n 913,\n 677,\n 641,\n 637,\n 693,\n 572,\n 541,\n 570,\n 556,\n 528,\n 516,\n 524,\n 557,\n 611,\n 562,\n 610,\n 555,\n 501,\n 473,\n 472,\n 601,\n 576,\n 617,\n 593,\n 565,\n 461,\n 528,\n 547,\n 649,\n 764,\n 683,\n 247,\n 192,\n 206,\n 212,\n 224,\n 217,\n 232,\n 218,\n 200,\n 225,\n 204,\n 209,\n 500,\n 2416,\n 2571,\n 1700,\n 1293,\n 1132,\n 1108,\n 1479,\n 1904,\n 2058,\n 2284,\n 2306,\n 2128,\n 1824,\n 1977,\n 2217,\n 1955,\n 1856,\n 1913,\n 1763,\n 1732,\n 1856,\n 2082,\n 2467,\n 2185,\n 2382,\n 2449,\n 2684,\n 2060,\n 1846,\n 1761,\n 1962,\n 2065,\n 2724,\n 2538,\n 2863,\n 2489,\n 3310,\n 3491,\n 2760,\n 1813,\n 1790,\n 1821,\n 2557,\n 2767,\n 3026,\n 3349,\n 3583,\n 2082,\n 1538,\n 1119,\n 1150,\n 1298,\n 1059,\n 1107,\n 1104,\n 1227,\n 1295,\n 1293,\n 1322,\n 1390,\n 1402,\n 1317,\n 1545,\n 1517,\n 1411,\n 1275,\n 1432,\n 1288,\n 1333,\n 1357,\n 1274,\n 1024,\n 931,\n 936,\n 823,\n 769,\n 657,\n 619,\n 618,\n 551,\n 587,\n 515,\n 580,\n 539,\n 516,\n 553,\n 555,\n 526,\n 538,\n 596,\n 552,\n 540,\n 558,\n 474,\n 541,\n 537,\n 514,\n 616,\n 584,\n 481,\n 472,\n 517,\n 532,\n 665,\n 761,\n 395,\n 220,\n 203,\n 224,\n 214,\n 209,\n 199,\n 220,\n 200,\n 197,\n 189,\n 215,\n 245,\n 259,\n 1495,\n 2990,\n 2081,\n 1411,\n 1105,\n 1072,\n 1241,\n 1775,\n 2045,\n 2222,\n 2239,\n 2182,\n 1972,\n 1942,\n 2004,\n 1961,\n 2005,\n 2199,\n 1857,\n 1711,\n 1619,\n 1810,\n 2141,\n 2065,\n 2027,\n 2436,\n 2245,\n 2115,\n 1896,\n 1944,\n 1954,\n 1888,\n 2588,\n 2299,\n 3383,\n 3251,\n 3167,\n 3224,\n 3647,\n 2305,\n 1896,\n 1705,\n 1935,\n 2867,\n 3177,\n 3233,\n 3792,\n 3620,\n 2111,\n 1464,\n 1307,\n 1199,\n 1341,\n 1327,\n 1066,\n 1124,\n 1215,\n 1286,\n 1256,\n 1339,\n 1384,\n 1392,\n 1273,\n 1424,\n 1497,\n 1276,\n 1253,\n 1379,\n 1367,\n 1293,\n 1333,\n 1077,\n 914,\n 912,\n 902,\n 690,\n 696,\n 667,\n 575,\n 615,\n 758,\n 590,\n 504,\n 556,\n 571,\n 584,\n 513,\n 526,\n 480,\n 481,\n 571,\n 500,\n 566,\n 628,\n 554,\n 521,\n 553,\n 533,\n 605,\n 584,\n 487,\n 412,\n 494,\n 527,\n 519,\n 556,\n 283,\n 213,\n 203,\n 218,\n 196,\n 208,\n 211,\n 216,\n 226,\n 208,\n 229,\n 241,\n 630,\n 2598,\n 2420,\n 1639,\n 1200,\n 1082,\n 1141,\n 1440,\n 2065,\n 2294,\n 2176,\n 2185,\n 2011,\n 1952,\n 2350,\n 2055,\n 1886,\n 2296,\n 2029,\n 1625,\n 1742,\n 1789,\n 2130,\n 2046,\n 1892,\n 1999,\n 2235,\n 2084,\n 1988,\n 2008,\n 1784,\n 1823,\n 2265,\n 2264,\n 3242,\n 3435,\n 3152,\n 2792,\n 3356,\n 3249,\n 2137,\n 1884,\n 1968,\n 2856,\n 3579,\n 3354,\n 3135,\n 3442,\n 3699,\n 1742,\n 1446,\n 1557,\n 1548,\n 1293,\n 1256,\n 1080,\n 1058,\n 1208,\n 1291,\n 1248,\n 1437,\n 1341,\n 1211,\n 1316,\n 1314,\n 1238,\n 1172,\n 1248,\n 1254,\n 1300,\n 1281,\n 1093,\n 874,\n 965,\n 909,\n 815,\n 679,\n 686,\n 598,\n 558,\n 630,\n 624,\n 554,\n 549,\n 554,\n 560,\n 507,\n 508,\n 527,\n 495,\n 546,\n 501,\n 539,\n 531,\n 509,\n 508,\n 539,\n 557,\n 551,\n 516,\n 498,\n 438,\n 506,\n 612,\n 544,\n 546,\n 638,\n 300,\n 242,\n 223,\n 202,\n 211,\n 238,\n 207,\n 200,\n 210,\n 205,\n 194,\n 229,\n 321,\n 1886,\n 2799,\n 1800,\n 1362,\n 1146,\n 1067,\n 1310,\n 1751,\n 2209,\n 2383,\n 2297,\n 2270,\n 2004,\n 2249,\n 2530,\n 1970,\n 2204,\n 2392,\n 1695,\n 1568,\n 1751,\n 1963,\n 2140,\n 1985,\n 1971,\n 1924,\n 2046,\n 1930,\n 1931,\n 1910,\n 2080,\n 2285,\n 2250,\n 3005,\n 3888,\n 3028,\n 2762,\n 3187,\n 3606,\n 1924,\n 1847,\n 1965,\n 2411,\n 2796,\n 3591,\n 3556,\n 3386,\n 3443,\n 2843,\n 1500,\n 1290,\n 1343,\n 1338,\n 1354,\n 1245,\n 1198,\n 1170,\n 1157,\n 1293,\n 1353,\n 1388,\n 1285,\n 1294,\n 1409,\n 1224,\n 1142,\n 1194,\n 1133,\n 1192,\n 1303,\n 1134,\n 946,\n 871,\n 959,\n 845,\n 802,\n 726,\n 616,\n 554,\n 611,\n 590,\n 514,\n 475,\n 494,\n 548,\n 551,\n 520,\n 529,\n 561,\n 481,\n 538,\n 539,\n 540,\n 490,\n 474,\n 481,\n 631,\n 649,\n 558,\n 537,\n 509,\n 489,\n 589,\n 520,\n 473,\n 775,\n 456,\n 243,\n 194,\n 215,\n 228,\n 209,\n 176,\n 216,\n 229,\n 218,\n 228,\n 267,\n 1164,\n 2895,\n 2166,\n 1475,\n 1105,\n 1104,\n 1166,\n 1442,\n 1969,\n 2427,\n 2307,\n 2113,\n 2121,\n 2200,\n 2773,\n 2462,\n 2453,\n 2557,\n 1922,\n 1459,\n 1526,\n 1802,\n 2106,\n 2097,\n 1953,\n 1978,\n 1823,\n 2012,\n 1977,\n 1678,\n 2045,\n 2471,\n 2118,\n 2898,\n 4099,\n 3327,\n 3030,\n 2923,\n 4017,\n 2083,\n 1747,\n 2037,\n 2217,\n 2054,\n 3150,\n 3398,\n 2960,\n 3427,\n 2500,\n 1580,\n 1275,\n 1238,\n 1176,\n 1207,\n 1306,\n 1252,\n 1097,\n 1229,\n 1177,\n 1279,\n 1276,\n 1296,\n 1263,\n 1336,\n 1290,\n 1201,\n 1237,\n 1117,\n 1146,\n 1135,\n 1246,\n 1177,\n 1054,\n 1002,\n 916,\n 899,\n 797,\n 623,\n 541,\n 571,\n 615,\n 527,\n 552,\n 511,\n 495,\n 530,\n 489,\n 496,\n 473,\n 498,\n 495,\n 622,\n 572,\n 526,\n 481,\n 468,\n 483,\n 520,\n 497,\n 514,\n 479,\n 507,\n 542,\n 573,\n 542,\n 525,\n 754,\n 324,\n 192,\n 205,\n 213,\n 191,\n 211,\n 229,\n 208,\n 227,\n 217,\n 220,\n 226,\n 420,\n 2081,\n 2667,\n 1847,\n 1289,\n 1037,\n 1119,\n 1351,\n 1647,\n 2265,\n 2523,\n 2263,\n 2341,\n 2113,\n 2740,\n 2474,\n 2175,\n 2504,\n 2024,\n 1432,\n 1544,\n 1550,\n 2024,\n 2118,\n 1896,\n 1854,\n 1753,\n 1946,\n 1938,\n 1737,\n 2141,\n 2376,\n 2305,\n 2633,\n 3717,\n 3764,\n 3003,\n 2782,\n 3547,\n 3076,\n 1778,\n 2060,\n 2019,\n 1902,\n 2146,\n 2981,\n 2920,\n 2739,\n 3453,\n 2109,\n 1173,\n 1143,\n 1175,\n 1103,\n 1250,\n 1239,\n 1275,\n 1035,\n 1081,\n 1035,\n 1225,\n 1385,\n 1278,\n 1322,\n 1366,\n 1270,\n 1231,\n 1114,\n 1088,\n 1134,\n 1189,\n 1209,\n 1204,\n 1100,\n 944,\n 865,\n 682,\n 675,\n 593,\n 557,\n 604,\n 536,\n 509,\n 563,\n 563,\n 467,\n 455,\n 509,\n 543,\n 538,\n 531,\n 523,\n 584,\n 548,\n 593,\n 478,\n 437,\n 489,\n 552,\n 533,\n 560,\n 498,\n 468,\n 456,\n 543,\n 540,\n 844,\n 725,\n 252,\n 215,\n 227,\n 189,\n 210,\n 210,\n 227,\n 206,\n 227,\n 252,\n 270,\n 1387,\n 2974,\n 2163,\n 1495,\n 1129,\n 1054,\n 1095,\n 1387,\n 1988,\n 2342,\n 2430,\n 2315,\n 2095,\n 2541,\n 2555,\n 2189,\n 2160,\n 1946,\n 1391,\n 1461,\n 1526,\n 1684,\n 1898,\n 1893,\n 1757,\n 1870,\n 1852,\n 1964,\n 1833,\n 2295,\n 2227,\n 2583,\n 2298,\n 3619,\n 4382,\n 3578,\n 3004,\n 3184,\n 3673,\n 1998,\n 1955,\n 1942,\n 1943,\n 1944,\n 2579,\n 2853,\n 2600,\n 2719,\n 3047,\n 1342,\n 1314,\n 1063,\n 1231,\n 1166,\n 1187,\n 1262,\n 1088,\n 1098,\n 965,\n 1144,\n 1205,\n 1349,\n 1158,\n 1290,\n 1328,\n 1162,\n 1146,\n 1094,\n 1177,\n 1106,\n 1326,\n 1281,\n 1183,\n 879,\n 879,\n 685,\n 623,\n 632,\n 614,\n 587,\n 550,\n 542,\n 575,\n 556,\n 598,\n 538,\n 479,\n 519,\n 574,\n 681,\n 556,\n 508,\n 527,\n 503,\n 546,\n 481,\n 532,\n 501,\n 519,\n 541,\n 519,\n 506,\n 471,\n 464,\n 582,\n 723,\n 904,\n 364,\n 214,\n 209,\n 194,\n 213,\n 214,\n 209,\n 193,\n 203,\n 201,\n 237,\n 283,\n 1484,\n 3006,\n 2179,\n 1510,\n 1152,\n 1069,\n 1127,\n 1453,\n 1894,\n 2339,\n 2401,\n 2270,\n 2226,\n 2213,\n 2102,\n 2051,\n 2360,\n 1863,\n 1484,\n 1663,\n 1414,\n 1454,\n 1775,\n 1721,\n 1840,\n 1720,\n 1815,\n 1997,\n 2098,\n 2031,\n 2183,\n 2305,\n 2345,\n 3436,\n 4266,\n 3537,\n 3141,\n 3365,\n 3790,\n 1855,\n 1594,\n 1540,\n 1805,\n 1717,\n 2070,\n 2760,\n 2613,\n 2787,\n 3316,\n 1743,\n 1411,\n 1209,\n 1129,\n 1245,\n 1219,\n 1203,\n 1215,\n 1220,\n 917,\n 955,\n 1259,\n 1257,\n 1209,\n 1226,\n 1334,\n 1217,\n 1174,\n 1169,\n 1178,\n 1119,\n 1399,\n 1502,\n 1047,\n 892,\n 881,\n 668,\n 640,\n 635,\n 599,\n 551,\n 622,\n 590,\n 580,\n 504,\n 575,\n 439,\n 469,\n 458,\n 526,\n 549,\n 501,\n 519,\n 515,\n 570,\n 494,\n 478,\n 510,\n 459,\n 584,\n 575,\n 525,\n 482,\n 509,\n 517,\n 613,\n 699,\n 923,\n 362,\n 207,\n 227,\n 227,\n 208,\n 197,\n 201,\n 192,\n 229,\n 216,\n 214,\n 256,\n 1307,\n 2842,\n 2179,\n 1549,\n 1243,\n 1121,\n 1175,\n 1492,\n 1922,\n 2435,\n 2393,\n 2258,\n 2266,\n 2350,\n 2288,\n 2004,\n 2291,\n 1993,\n 1358,\n 1390,\n 1445,\n 1324,\n 1646,\n 1680,\n 1808,\n 1969,\n 2048,\n 2382,\n 2281,\n 2089,\n 2262,\n 2164,\n 1858,\n 2445,\n 4331,\n 3550,\n 3069,\n 3567,\n 4115,\n 1925,\n 1569,\n 1673,\n 1622,\n 1445,\n 1662,\n 2717,\n 2815,\n 2941,\n 3518,\n 2436,\n 1482,\n 1257,\n 1162,\n 1199,\n 1215,\n 1138,\n 1229,\n 1245,\n 1027,\n 991,\n 1151,\n 1276,\n 1194,\n 1316,\n 1452,\n 1231,\n 1140,\n 1223,\n 1175,\n 1252,\n 1399,\n 1490,\n 1082,\n 894,\n 810,\n 676,\n 659,\n 581,\n 605,\n 591,\n 538,\n 560,\n 537,\n 519,\n 492,\n 481,\n 521,\n 460,\n 495,\n 444,\n 499,\n 534,\n 525,\n 550,\n 505,\n 494,\n 532,\n 491,\n 552,\n 670,\n 600,\n 609,\n 496,\n 548,\n 576,\n 681,\n 925,\n 551,\n 241,\n 207,\n 232,\n 228,\n 210,\n 209,\n 217,\n 195,\n 228,\n 210,\n 233,\n 582,\n 2105,\n 2583,\n 1732,\n 1438,\n 1218,\n 1034,\n 1314,\n 1647,\n 2278,\n 2386,\n 2177,\n 2259,\n 2278,\n 2372,\n 2185,\n 2251,\n 2238,\n 1696,\n 1226,\n 1208,\n 1298,\n 1568,\n 1596,\n 1722,\n 2048,\n 2216,\n 2368,\n 2405,\n 2163,\n 2115,\n 2205,\n 1919,\n 2092,\n 3465,\n 3833,\n 3571,\n 3623,\n 3918,\n 2049,\n 1533,\n 1529,\n 1406,\n 1265,\n 1435,\n 2138,\n 2999,\n 2953,\n 2948,\n 3425,\n 1997,\n 1600,\n 1271,\n 1140,\n 1219,\n 1166,\n 1230,\n 1319,\n 1349,\n 1370,\n 1180,\n 1303,\n 1356,\n 1294,\n 1519,\n 1357,\n 1235,\n 1188,\n 1161,\n 1249,\n 1339,\n 1662,\n 1208,\n 810,\n 811,\n 719,\n 632,\n 579,\n 641,\n 593,\n 532,\n 540,\n 532,\n 545,\n 540,\n 459,\n 594,\n 465,\n 486,\n 438,\n 510,\n 489,\n 513,\n 641,\n 549,\n 480,\n 539,\n 475,\n 498,\n 551,\n 549,\n 590,\n 515,\n 475,\n 531,\n 632,\n 790,\n 895,\n 335,\n 194,\n 208,\n 204,\n 243,\n 234,\n 226,\n 214,\n 217,\n 237,\n 248,\n 1147,\n 2646,\n 2252,\n 1664,\n 1384,\n 1215,\n 1217,\n 1456,\n 1955,\n 2411,\n 2189,\n 2222,\n 2315,\n 2269,\n 2390,\n 2121,\n 2324,\n 1926,\n 1547,\n 1353,\n 1150,\n 1471,\n 1757,\n 1698,\n 2134,\n 2216,\n 2096,\n 2462,\n 2326,\n 2187,\n 2213,\n 1931,\n 2246,\n 2606,\n 3810,\n 3543,\n 3495,\n 3645,\n 3150,\n 1706,\n 1632,\n 1440,\n 1252,\n 1304,\n 1686,\n 2605,\n 2987,\n 2769,\n 2850,\n 3279,\n 2067,\n 1457,\n 1237,\n 1088,\n 1204,\n 1303,\n 1320,\n 1344,\n 1487,\n 1367,\n 1389,\n 1474,\n 1407,\n 1382,\n 1304,\n 1336,\n 1190,\n 1174,\n 1179,\n 1275,\n 1556,\n 1446,\n 985,\n 818,\n 680,\n 615,\n 582,\n 580,\n 601,\n 544,\n 556,\n 528,\n 536,\n 514,\n 511,\n 511,\n 467,\n 519,\n 531,\n 474,\n 427,\n 494,\n 477,\n 522,\n 465,\n 508,\n 489,\n 500,\n 463,\n 486,\n 524,\n 572,\n 556,\n 573,\n 563,\n 678,\n 839,\n 737,\n 254,\n 230,\n 162,\n 206,\n 192,\n 214,\n 205,\n 241,\n 221,\n 217,\n 321,\n 1727,\n 2623,\n 2106,\n 1572,\n 1351,\n 1218,\n 1313,\n 1575,\n 2113,\n 2182,\n 2152,\n 2243,\n 2151,\n 2387,\n 2159,\n 2340,\n 2074,\n 1759,\n 1560,\n 1384,\n 1084,\n 1355,\n 1719,\n 1790,\n 2063,\n 2176,\n 2249,\n 2371,\n 2280,\n 2021,\n 1987,\n 2008,\n 2439,\n 2690,\n 3886,\n 3230,\n 3000,\n 4050,\n 2353,\n 1586,\n 1481,\n 1066,\n 1192,\n 1512,\n 1679,\n 3048,\n 2788,\n 2709,\n 2760,\n 3082,\n 2243,\n 1479,\n 1272,\n 1208,\n 1102,\n 1171,\n 1303,\n 1264,\n 1376,\n 1406,\n 1513,\n 1420,\n 1341,\n 1182,\n 1181,\n 1179,\n 1172,\n 1203,\n 1198,\n 1430,\n 1568,\n 1222,\n 918,\n 848,\n 747,\n 632,\n 611,\n 570,\n 551,\n 579,\n 570,\n 564,\n 618,\n 612,\n 584,\n 517,\n 547,\n 521,\n 485,\n 422,\n 512,\n 417,\n 469,\n 421,\n 441,\n 535,\n 601,\n 479,\n 494,\n 528,\n 513,\n 624,\n 553,\n 591,\n 637,\n 727,\n 950,\n 485,\n 220,\n 218,\n 210,\n 218,\n 184,\n 196,\n 188,\n 198,\n 190,\n 206,\n 281,\n 1477,\n 2806,\n 2237,\n 1699,\n 1444,\n 1221,\n 1351,\n 1500,\n 2028,\n 2226,\n 2107,\n 2272,\n 2140,\n 2258,\n 2155,\n 2205,\n 2093,\n 1697,\n 1474,\n 1341,\n 1149,\n 1194,\n 1556,\n 1801,\n 1880,\n 2164,\n 2359,\n 2392,\n 2187,\n 1984,\n 1815,\n 2098,\n 2175,\n 2205,\n 3743,\n 3330,\n 3210,\n 3784,\n 2300,\n 1579,\n 1345,\n 1137,\n 1207,\n 1468,\n 1581,\n 2009,\n 2179,\n 2604,\n 2691,\n 2794,\n 2260,\n 1319,\n 1168,\n 1083,\n 1076,\n 1097,\n 1277,\n 1416,\n 1402,\n 1466,\n 1485,\n 1434,\n 1119,\n 1055,\n 1104,\n 1083,\n 1169,\n 1249,\n 1299,\n 1401,\n 1479,\n 1160,\n 867,\n 869,\n 771,\n 698,\n 668,\n 559,\n 613,\n 579,\n 511,\n 601,\n 611,\n 629,\n 579,\n 555,\n 491,\n 578,\n 501,\n 425,\n 444,\n 477,\n 526,\n 477,\n 471,\n 474,\n 526,\n 469,\n 503,\n 529,\n 539,\n 575,\n 588,\n 608,\n 652,\n 766,\n 992,\n 667,\n 211,\n 236,\n 205,\n 195,\n 195,\n 201,\n 222,\n 194,\n 229,\n 198,\n 270,\n 1334,\n 2710,\n 2398,\n 1776,\n 1366,\n 1384,\n 1450,\n 1498,\n 1994,\n 2214,\n 2066,\n 2188,\n 2147,\n 1987,\n 2127,\n 2251,\n 2004,\n 1678,\n 1512,\n 1370,\n 1179,\n 1012,\n 1346,\n 1737,\n 1690,\n 2174,\n 2239,\n 2361,\n 2276,\n 1984,\n 1896,\n 2289,\n 2031,\n 2326,\n 4081,\n 3667,\n 3435,\n 3902,\n 2998,\n 1600,\n 1512,\n 1227,\n 1284,\n 1425,\n 1551,\n 1906,\n 1660,\n 2814,\n 2847,\n 2655,\n 2997,\n 1508,\n 1174,\n 1120,\n 960,\n 1152,\n 1211,\n 1292,\n 1411,\n 1528,\n 1368,\n 1323,\n 1116,\n 1055,\n 1103,\n 1137,\n 1139,\n 1192,\n 1280,\n 1367,\n 1539,\n 1178,\n 879,\n 784,\n 749,\n 678,\n 661,\n 655,\n 692,\n 562,\n 502,\n 535,\n 557,\n 571,\n 585,\n 580,\n 506,\n 463,\n 426,\n 439,\n 524,\n 600,\n 506,\n 497,\n 481,\n 470,\n 503,\n 514,\n 521,\n 514,\n 530,\n 503,\n 656,\n 586,\n 616,\n 653,\n 837,\n 1000,\n 329,\n 215,\n 251,\n 184,\n 191,\n 227,\n 214,\n 209,\n 201,\n 181,\n 244,\n 521,\n 2114,\n 2744,\n 2086,\n 1618,\n 1365,\n 1281,\n 1446,\n 1646,\n 2264,\n 2175,\n 2142,\n 2153,\n 2016,\n 2104,\n 2213,\n 2077,\n 1725,\n 1610,\n 1462,\n 1398,\n 1041,\n 1171,\n 1351,\n 1666,\n 1926,\n 2295,\n 2143,\n 2250,\n 2081,\n 2119,\n 2230,\n 2210,\n 2124,\n 3662,\n 4363,\n 3616,\n 3480,\n 3986,\n 1926,\n 1422,\n 1108,\n 1015,\n 1112,\n 1505,\n 1938,\n 1552,\n 1940,\n 2827,\n 2708,\n 3026,\n 2663,\n 1485,\n 1166,\n 1231,\n 1227,\n 1234,\n 1249,\n 1287,\n 1409,\n 1436,\n 1320,\n 1172,\n 1023,\n 1046,\n 1133,\n 1143,\n 1117,\n 1281,\n 1298,\n 1471,\n 1402,\n 1033,\n 813,\n 805,\n 683,\n 634,\n 627,\n 623,\n 583,\n 551,\n 554,\n 559,\n 562,\n 524,\n 508,\n 485,\n 451,\n 439,\n 457,\n 618,\n 822,\n 543,\n 527,\n 498,\n 499,\n 489,\n 464,\n 529,\n 511,\n 556,\n 540,\n 491,\n 582,\n 618,\n 601,\n 666,\n 1012,\n 555,\n 225,\n 178,\n 227,\n 207,\n 202,\n 234,\n 207,\n 224,\n 235,\n 268,\n 1315,\n 2586,\n 2192,\n 1748,\n 1449,\n 1370,\n 1330,\n 1480,\n 1859,\n 2415,\n 2107,\n 2257,\n 2198,\n 1857,\n 2141,\n 1999,\n 1871,\n 1665,\n 1550,\n 1458,\n 1345,\n 972,\n 1192,\n 1494,\n 1630,\n 1897,\n 2096,\n 2101,\n 2196,\n 2225,\n 2223,\n 2091,\n 2103,\n 2858,\n 4712,\n 3982,\n 3230,\n 4061,\n 2964,\n 1520,\n 1148,\n 1020,\n 1107,\n 1674,\n 1694,\n 1397,\n 1526,\n 2441,\n 3035,\n 2804,\n 3265,\n 2119,\n 1272,\n 1227,\n 1226,\n 1347,\n 1296,\n 1282,\n 1276,\n 1436,\n 1308,\n 1145,\n 1043,\n 1010,\n 1242,\n 1290,\n 1171,\n 1214,\n 1378,\n 1320,\n 1500,\n 1213,\n 925,\n 808,\n 697,\n 670,\n 579,\n 566,\n 575,\n 477,\n 569,\n 598,\n 561,\n 495,\n 514,\n 445,\n 379,\n 458,\n 431,\n 444,\n 507,\n 521,\n 471,\n 500,\n 504,\n 448,\n 489,\n 445,\n 493,\n 552,\n 545,\n 516,\n 530,\n 560,\n 578,\n 639,\n 790,\n 1053,\n 360,\n 218,\n 221,\n 187,\n 205,\n 203,\n 198,\n 201,\n 252,\n 226,\n 279,\n 1501,\n 2721,\n 2226,\n 1718,\n 1558,\n 1332,\n 1346,\n 1432,\n 1814,\n 2178,\n 2196,\n 2096,\n 2008,\n 1804,\n 1894,\n 1952,\n 1863,\n 1690,\n 1478,\n 1455,\n 1413,\n 1077,\n 1138,\n 1355,\n 1614,\n 1854,\n 2146,\n 2151,\n 2406,\n 2218,\n 1930,\n 1843,\n 2155,\n 2841,\n 3947,\n 3062,\n 3092,\n 4007,\n 2947,\n 1604,\n 1298,\n 1052,\n 1400,\n 1798,\n 1540,\n 1569,\n 1611,\n 2267,\n 3227,\n 3010,\n 3393,\n 2663,\n 1471,\n 1293,\n 1311,\n 1290,\n 1394,\n 1349,\n 1320,\n 1372,\n 1340,\n 1075,\n 891,\n 1056,\n 1186,\n 1208,\n 1245,\n 1211,\n 1282,\n 1386,\n 1397,\n 1111,\n 742,\n 760,\n 668,\n 675,\n 705,\n 740,\n 633,\n 562,\n 531,\n 524,\n 483,\n 548,\n 466,\n 500,\n 453,\n 401,\n 425,\n 435,\n 485,\n 495,\n 449,\n 493,\n 479,\n 470,\n 497,\n 496,\n 506,\n 512,\n 534,\n 455,\n 507,\n 493,\n 529,\n 547,\n 716,\n 968,\n 355,\n 217,\n 207,\n 191,\n 194,\n 210,\n 211,\n 211,\n 183,\n 204,\n 224,\n 1293,\n 2699,\n 2263,\n 1721,\n 1489,\n 1349,\n 1403,\n 1532,\n 1702,\n 1979,\n 2025,\n 1970,\n 2042,\n 1849,\n 1935,\n 2047,\n 1838,\n 1628,\n 1471,\n 1530,\n 1550,\n 1378,\n 1185,\n 1376,\n 1593,\n 1713,\n 2032,\n 2260,\n 2402,\n 2007,\n 1730,\n 1814,\n 1781,\n 2194,\n 2988,\n 3164,\n 2945,\n 3492,\n 3787,\n 2199,\n 1593,\n 1373,\n 1611,\n 1822,\n 1503,\n 1557,\n 1570,\n 1785,\n 3096,\n 3121,\n 3295,\n 3461,\n 1820,\n 1472,\n 1487,\n 1431,\n 1319,\n 1294,\n 1394,\n 1290,\n 1167,\n 1104,\n 1015,\n 1293,\n 1318,\n 1251,\n 1292,\n 1235,\n 1245,\n 1370,\n 1539,\n 1092,\n 748,\n 665,\n 721,\n 716,\n 818,\n 966,\n 967,\n 935,\n 837,\n 785,\n 613,\n 552,\n 509,\n 493,\n 489,\n 502,\n 470,\n 472,\n 511,\n 600,\n 560,\n 485,\n 467,\n 430,\n 486,\n 440,\n 478,\n 443,\n 434,\n 418,\n 461,\n 561,\n 506,\n 515,\n 687,\n 1004,\n 450,\n 207,\n 198,\n 201,\n 228,\n 206,\n 247,\n 223,\n 219,\n 212,\n 252,\n 924,\n 2686,\n 2496,\n 1866,\n 1541,\n 1391,\n 1386,\n 1521,\n 1679,\n 2080,\n 1960,\n 1895,\n 1971,\n 1799,\n 1971,\n 1917,\n 1804,\n 1638,\n 1365,\n 1475,\n 1446,\n 1442,\n 1407,\n 1502,\n 1557,\n 1709,\n 2100,\n 2313,\n 2218,\n 1918,\n 1715,\n 1492,\n 1630,\n 1995,\n 2640,\n 3475,\n 3083,\n 3630,\n 3191,\n 2110,\n 1608,\n 1476,\n 1590,\n 1692,\n 1593,\n 1580,\n 1525,\n 1617,\n 2814,\n 3239,\n 3271,\n 3268,\n 2013,\n 1379,\n 1583,\n 1278,\n 1245,\n 1282,\n 1338,\n 1276,\n 1205,\n 1098,\n 1125,\n 1296,\n 1352,\n 1179,\n 1225,\n 1153,\n 1090,\n 1362,\n 1495,\n 1117,\n 787,\n 627,\n 769,\n 741,\n 1055,\n 1044,\n 1080,\n 1092,\n 1007,\n 945,\n 853,\n 664,\n 542,\n 461,\n 579,\n 584,\n 495,\n 472,\n 420,\n 456,\n 524,\n 584,\n 502,\n 463,\n 460,\n 452,\n 492,\n 465,\n 448,\n 476,\n 399,\n 532,\n 494,\n 464,\n 671,\n 993,\n 473,\n 220,\n 205,\n 220,\n 220,\n 190,\n 237,\n 198,\n 198,\n 225,\n 240,\n 624,\n 2549,\n 2681,\n 1887,\n 1672,\n 1466,\n 1371,\n 1473,\n 1618,\n 2082,\n 1867,\n 1917,\n 1878,\n 1861,\n 1843,\n 2062,\n 1749,\n 1506,\n 1382,\n 1406,\n 1456,\n 1544,\n 1447,\n 1498,\n 1623,\n 1728,\n 2208,\n 2432,\n 2147,\n 1719,\n 1672,\n 1648,\n 1788,\n 2119,\n 2134,\n 3244,\n 3050,\n 3350,\n 3292,\n 2030,\n 1741,\n 1856,\n 1509,\n 1567,\n 1614,\n 1399,\n 1549,\n 1641,\n 2087,\n 3578,\n 3233,\n 2713,\n 3151,\n 1488,\n 1548,\n 1062,\n 1146,\n 1148,\n 1231,\n 1150,\n 1251,\n 1100,\n 1166,\n 1290,\n 1288,\n 1285,\n 1218,\n 1165,\n 1271,\n 1254,\n 1437,\n 1202,\n 897,\n 813,\n 830,\n 816,\n 991,\n 1100,\n 1130,\n 1107,\n 1101,\n 956,\n 999,\n 947,\n 722,\n 569,\n 567,\n 765,\n 671,\n 500,\n 420,\n 419,\n 435,\n 477,\n 515,\n 482,\n 452,\n 515,\n 512,\n 430,\n 392,\n 431,\n 428,\n 486,\n 464,\n 469,\n 650,\n 1006,\n 762,\n 236,\n 220,\n 193,\n 198,\n 213,\n 222,\n 209,\n 205,\n 212,\n 240,\n 608,\n 2515,\n 2591,\n 1970,\n 1690,\n 1661,\n 1549,\n 1534,\n 1690,\n 2067,\n 1889,\n 1913,\n 1865,\n 1878,\n 2037,\n 1891,\n 1587,\n 1422,\n 1355,\n 1348,\n 1319,\n 1331,\n 1479,\n 1458,\n 1522,\n 1816,\n 2106,\n 2204,\n 2086,\n 1889,\n 1609,\n 1819,\n 1947,\n 1951,\n 1814,\n 3316,\n 3168,\n 3330,\n 3242,\n 1839,\n 1810,\n 1703,\n 1275,\n 1481,\n 1371,\n 1227,\n 1430,\n 1800,\n 2334,\n 3768,\n 2971,\n 2716,\n 3104,\n 1706,\n 1258,\n 1012,\n 934,\n 1051,\n 1017,\n 954,\n 1186,\n 1323,\n 1261,\n 1254,\n 1214,\n 1270,\n 1183,\n 1257,\n 1282,\n 1363,\n 1395,\n 1151,\n 1026,\n 858,\n 930,\n 1038,\n 1164,\n 1127,\n 1222,\n 1252,\n 1193,\n 1017,\n 1044,\n 985,\n 851,\n 686,\n 622,\n 724,\n 583,\n 468,\n 409,\n 395,\n 421,\n 383,\n 471,\n 454,\n 490,\n 543,\n 478,\n 422,\n 385,\n 399,\n 415,\n 441,\n 508,\n 477,\n 644,\n 975,\n 767,\n 214,\n 234,\n 225,\n 201,\n 234,\n 210,\n 209,\n 210,\n 236,\n 239,\n 685,\n 2670,\n 2352,\n 1973,\n 1808,\n 1765,\n 1545,\n 1654,\n 1797,\n 2067,\n 1807,\n 1852,\n 1909,\n 1904,\n 2022,\n 1913,\n 1534,\n 1380,\n 1331,\n 1312,\n 1315,\n 1287,\n 1359,\n 1464,\n 1560,\n 1789,\n 2008,\n 1776,\n 1754,\n 1648,\n 1648,\n 1938,\n 1986,\n 1665,\n 1836,\n 3339,\n 3150,\n 3370,\n 3116,\n 1766,\n 1735,\n 1399,\n 1333,\n 1409,\n 1260,\n 1372,\n 1445,\n 1922,\n 1982,\n 2200,\n 2127,\n 2388,\n 2539,\n 1505,\n 1047,\n 1066,\n 1034,\n 1077,\n 1123,\n 1140,\n 1210,\n 1334,\n 1284,\n 1308,\n 1211,\n 1259,\n 1223,\n 1229,\n 1255,\n 1390,\n 1377,\n 1110,\n 1141,\n 986,\n 1076,\n 1133,\n 1248,\n 1312,\n 1117,\n 1154,\n 1165,\n 1087,\n 1049,\n 1051,\n 970,\n 691,\n 515,\n 463,\n 436,\n 383,\n 415,\n 441,\n 424,\n 424,\n 417,\n 441,\n 432,\n 452,\n 419,\n 410,\n 402,\n 398,\n 453,\n 480,\n 519,\n 565,\n 751,\n 1016,\n 491,\n 210,\n 184,\n 221,\n 189,\n 191,\n 213,\n 202,\n 193,\n 213,\n 244,\n 308,\n 1613,\n 2869,\n 2140,\n 1909,\n 1782,\n 1746,\n 1721,\n 1851,\n 2102,\n 2061,\n 1846,\n 1980,\n 1993,\n 2271,\n 2087,\n 1634,\n 1300,\n 1249,\n 1361,\n 1313,\n 1293,\n 1338,\n 1352,\n 1370,\n 1591,\n 1794,\n 1781,\n 1792,\n 1918,\n 1847,\n 2089,\n 2110,\n 1756,\n 1657,\n 2931,\n 3280,\n 2970,\n 3446,\n 1932,\n 1674,\n 1378,\n 1338,\n 1508,\n 1470,\n 1570,\n 1737,\n 1772,\n 1644,\n 1877,\n 2147,\n 1865,\n 1988,\n 1477,\n 1160,\n 1094,\n 1120,\n 1109,\n 1249,\n 1336,\n 1414,\n 1200,\n 1329,\n 1310,\n 1245,\n 1214,\n 1232,\n 1198,\n 1252,\n 1318,\n 1430,\n 1077,\n 1094,\n 1200,\n 1171,\n 1277,\n 1256,\n 1245,\n 1162,\n 1113,\n 1018,\n 937,\n 1030,\n 1039,\n 934,\n 921,\n 622,\n 410,\n 405,\n 374,\n 366,\n 371,\n 395,\n 428,\n 412,\n 411,\n 429,\n 431,\n 444,\n 456,\n 420,\n 370,\n 384,\n 414,\n 484,\n 529,\n 703,\n 903,\n 851,\n 246,\n 190,\n 208,\n 207,\n 222,\n 214,\n 210,\n 221,\n 204,\n 236,\n 310,\n 1736,\n 2684,\n 2068,\n 1844,\n 1864,\n 1684,\n 1741,\n 1906,\n 2132,\n 1968,\n 1941,\n 1950,\n 2265,\n 2200,\n 1840,\n 1363,\n 1339,\n 1279,\n 1314,\n 1272,\n 1293,\n 1302,\n 1326,\n 1378,\n 1633,\n 1652,\n 1704,\n 1927,\n 1781,\n 1899,\n 2067,\n 1813,\n 1599,\n 1745,\n 3043,\n 3390,\n 3024,\n 3572,\n 2297,\n 1746,\n 1428,\n 1590,\n 1509,\n 1515,\n 1505,\n 1699,\n 1626,\n 1765,\n 2016,\n 1905,\n 1634,\n 1170,\n 1257,\n 1408,\n 1224,\n 1115,\n 1146,\n 1291,\n 1367,\n 1317,\n 1402,\n 1235,\n 1378,\n 1290,\n 1312,\n 1321,\n 1187,\n 1295,\n 1358,\n 1337,\n 1023,\n 1099,\n 1258,\n 1272,\n 1340,\n 1370,\n 1346,\n 1280,\n 1112,\n 1101,\n 975,\n 1011,\n 1100,\n 965,\n 756,\n 541,\n 422,\n 398,\n 412,\n 369,\n 348,\n 419,\n 468,\n 462,\n 475,\n 509,\n 441,\n 426,\n 405,\n 428,\n 369,\n 382,\n 433,\n 467,\n 490,\n 659,\n 908,\n 787,\n 227,\n 205,\n 193,\n 207,\n 221,\n 215,\n 182,\n 222,\n 230,\n 233,\n 310,\n 1705,\n 2707,\n 2166,\n 1899,\n 1902,\n 1834,\n 1696,\n 1868,\n 2081,\n 2040,\n 2080,\n 2073,\n 2236,\n 2141,\n 1698,\n 1366,\n 1193,\n 1145,\n 1121,\n 1246,\n 1283,\n 1263,\n 1315,\n 1401,\n 1614,\n 1686,\n 1835,\n 1907,\n 1725,\n 1794,\n 1891,\n 1740,\n 1642,\n 2073,\n 3476,\n 3477,\n 3097,\n 3766,\n 2412,\n 1817,\n 1518,\n 1582,\n 1587,\n 1471,\n 1677,\n 1596,\n 1636,\n 1904,\n 1820,\n 1809,\n 1375,\n 1157,\n 1113,\n 1422,\n 1452,\n 1245,\n 1177,\n 1388,\n 1290,\n 1351,\n 1438,\n 1237,\n 1265,\n 1188,\n 1350,\n 1306,\n 1258,\n 1256,\n 1307,\n 1239,\n 986,\n 1186,\n 1408,\n 1416,\n 1493,\n 1504,\n 1459,\n 1264,\n 1125,\n 1158,\n 1119,\n 1082,\n 891,\n 850,\n 697,\n 571,\n 408,\n 376,\n 414,\n 399,\n 434,\n 541,\n 412,\n 437,\n 476,\n 485,\n 449,\n 403,\n 428,\n 383,\n 396,\n 403,\n 375,\n 486,\n 552,\n 683,\n 1005,\n 639,\n 236,\n 187,\n 214,\n 216,\n 191,\n 217,\n 213,\n 235,\n 219,\n 193,\n 251,\n 1166,\n 2758,\n 2314,\n 2010,\n 1905,\n 1863,\n 1776,\n 1779,\n 2031,\n 2120,\n 2212,\n 2187,\n 2033,\n 1907,\n 1736,\n 1449,\n 1217,\n 1212,\n 1157,\n 1240,\n 1322,\n 1218,\n 1330,\n 1323,\n 1420,\n 1627,\n 1760,\n 2004,\n 1908,\n 1893,\n 1794,\n 2076,\n 1952,\n 1807,\n 2307,\n 2958,\n 2939,\n 3582,\n 2726,\n 1854,\n 1612,\n 1564,\n 1601,\n 1464,\n 1705,\n 1490,\n 1598,\n 1943,\n 1597,\n 1724,\n 1325,\n 1215,\n 1120,\n 1272,\n 1288,\n 1465,\n 1345,\n 1390,\n 1259,\n 1432,\n 1467,\n 1459,\n 1133,\n 1083,\n 1268,\n 1260,\n 1277,\n 1171,\n 1321,\n 1231,\n 1016,\n 1073,\n 1308,\n 1443,\n 1425,\n 1458,\n 1288,\n 1235,\n 1120,\n 1150,\n 1223,\n 1068,\n 950,\n 851,\n 731,\n 591,\n 464,\n 414,\n 355,\n 364,\n 374,\n 449,\n 401,\n 430,\n 415,\n 449,\n 437,\n 403,\n 444,\n 445,\n 374,\n 389,\n 404,\n 439,\n 567,\n 691,\n 975,\n 749,\n 252,\n 225,\n 221,\n 252,\n 191,\n 185,\n 195,\n 240,\n 199,\n 235,\n 245,\n 989,\n 2807,\n 2295,\n 1938,\n 1773,\n 1942,\n 1826,\n 1888,\n 2065,\n 2282,\n 2337,\n 2139,\n 1974,\n 1822,\n 1583,\n 1484,\n 1257,\n 1183,\n 1109,\n 1242,\n 1336,\n 1359,\n 1326,\n 1386,\n 1328,\n 1503,\n 1851,\n 2079,\n 1994,\n 1654,\n 1784,\n 1910,\n 2036,\n 2036,\n 2204,\n 2637,\n 2663,\n 3059,\n 1925,\n 1685,\n 1420,\n 1338,\n 1292,\n 1534,\n 1709,\n 1530,\n 1705,\n 1584,\n 1634,\n 1628,\n 1467,\n 1444,\n 1318,\n 1330,\n 1279,\n 1326,\n 1398,\n 1401,\n 1311,\n 1448,\n 1537,\n 1527,\n 1150,\n 1081,\n 1203,\n 1245,\n 1128,\n 1181,\n 1367,\n 1164,\n 1073,\n 1133,\n 1308,\n 1453,\n 1403,\n 1381,\n 1298,\n 1302,\n 1204,\n 1125,\n 1137,\n 971,\n 902,\n 791,\n 558,\n 513,\n 362,\n 373,\n 344,\n 415,\n 390,\n 400,\n 407,\n 373,\n 440,\n 463,\n 444,\n 405,\n 408,\n 399,\n 433,\n 448,\n 422,\n 476,\n 601,\n 789,\n 1136,\n 502,\n 226,\n 212,\n 210,\n 200,\n 189,\n 194,\n 221,\n 203,\n 205,\n 234,\n 234,\n 543,\n 2472,\n 2483,\n 1963,\n 1824,\n 1861,\n 1835,\n 1816,\n 2127,\n 2458,\n 2321,\n 2192,\n 1882,\n 1661,\n 1497,\n 1383,\n 1382,\n 1222,\n 1278,\n 1277,\n 1309,\n 1279,\n 1288,\n 1341,\n 1444,\n 1581,\n 1699,\n 1964,\n 1702,\n 1602,\n 1822,\n 2018,\n 2240,\n 2383,\n 2003,\n 2354,\n 1763,\n 2306,\n 1826,\n 1464,\n 1326,\n 1417,\n 1604,\n 1751,\n 1966,\n 1524,\n 1522,\n 1362,\n 1765,\n 1548,\n 1613,\n 1474,\n 1365,\n 1253,\n 1228,\n 1157,\n 1315,\n 1525,\n 1413,\n 1438,\n 1531,\n 1517,\n 1276,\n 1123,\n 1221,\n 1144,\n 1280,\n 1198,\n 1390,\n 1152,\n 1115,\n 1123,\n 1255,\n 1362,\n 1386,\n 1318,\n 1231,\n 1279,\n 1222,\n 1051,\n 1007,\n 881,\n 778,\n 600,\n 603,\n 599,\n 399,\n 386,\n 383,\n 418,\n 376,\n 493,\n 427,\n 387,\n 431,\n 486,\n 450,\n 457,\n 471,\n 425,\n 424,\n 446,\n 489,\n 505,\n 665,\n 825,\n 1260,\n 618,\n 235,\n 207,\n 226,\n 191,\n 218,\n 201,\n 211,\n 184,\n 222,\n 229,\n 216,\n 406,\n 2266,\n 2663,\n 1997,\n 1849,\n 1894,\n 1872,\n 1755,\n 1999,\n 2383,\n 2373,\n 1989,\n 1890,\n 1609,\n 1483,\n 1413,\n 1387,\n 1337,\n 1256,\n 1394,\n 1275,\n 1316,\n 1314,\n 1395,\n 1618,\n 1678,\n 1857,\n 1939,\n 1812,\n 1630,\n 1766,\n 2178,\n 2701,\n 2333,\n 2134,\n 1994,\n 1553,\n 2017,\n 1813,\n 1542,\n 1485,\n 1816,\n 1845,\n 1881,\n 1368,\n 1389,\n 1335,\n 1279,\n 1523,\n 1541,\n 1516,\n 1460,\n 1254,\n 1232,\n 1227,\n 1212,\n 1246,\n 1348,\n 1500,\n 1353,\n 1324,\n 1395,\n 1288,\n 1303,\n 1244,\n 1210,\n 1209,\n 1206,\n 1445,\n 1127,\n 1049,\n 1043,\n 1156,\n 1285,\n 1105,\n 1202,\n 1150,\n 1133,\n 1065,\n 966,\n 787,\n 682,\n 609,\n 590,\n 600,\n 527,\n 422,\n 349,\n 500,\n 428,\n 426,\n 418,\n 354,\n 433,\n 488,\n 473,\n 507,\n 500,\n 470,\n 403,\n 428,\n 443,\n 475,\n 514,\n 585,\n 886,\n 1148,\n 529,\n 263,\n 208,\n 191,\n 203,\n 193,\n 228,\n 189,\n 208,\n 233,\n 233,\n 220,\n 330,\n 1857,\n 2669,\n 2131,\n 1896,\n 1901,\n 1978,\n 1718,\n 1881,\n 2350,\n 2227,\n 1946,\n 1723,\n 1573,\n 1462,\n 1453,\n 1315,\n 1425,\n 1305,\n 1284,\n 1210,\n 1143,\n 1281,\n 1367,\n 1767,\n 1687,\n 1899,\n 1928,\n 1891,\n 1920,\n 2018,\n 2283,\n 2387,\n 2065,\n 2155,\n 1703,\n 1647,\n 1884,\n 1955,\n 1724,\n 1788,\n 2039,\n 1723,\n 1141,\n 1056,\n 1212,\n 1124,\n 1140,\n 1450,\n 1692,\n 1713,\n 1513,\n 1310,\n 1114,\n 1201,\n 1224,\n 1266,\n 1375,\n 1435,\n 1392,\n 1383,\n 1435,\n 1378,\n 1270,\n 1372,\n 1447,\n 1230,\n 1268,\n 1297,\n 1158,\n 1023,\n 964,\n 1070,\n 1119,\n 1110,\n 1036,\n 953,\n 873,\n 897,\n 884,\n 671,\n 626,\n 638,\n 512,\n 505,\n 452,\n 434,\n 394,\n 389,\n 397,\n 354,\n 421,\n 403,\n 385,\n 491,\n 509,\n 590,\n 485,\n 471,\n 455,\n 445,\n 450,\n 506,\n 498,\n 621,\n 785,\n 1160,\n 562,\n 201,\n 225,\n 231,\n 208,\n 209,\n 217,\n 230,\n 202,\n 200,\n 208,\n 239,\n 287,\n 1752,\n 2742,\n 2053,\n 1902,\n 1890,\n 2057,\n 1995,\n 2121,\n 2186,\n 2091,\n 1904,\n 1820,\n 1638,\n 1479,\n 1433,\n 1509,\n 1378,\n 1467,\n 1110,\n 1088,\n 1143,\n 1247,\n 1320,\n 1446,\n 1645,\n 1693,\n 1612,\n 1627,\n 1907,\n 2099,\n 2295,\n 2199,\n 2211,\n 1970,\n 1659,\n 1957,\n 1835,\n 1962,\n 1863,\n 1972,\n 1750,\n 1112,\n 938,\n 1074,\n 1097,\n 1037,\n 1169,\n 1316,\n 1531,\n 1553,\n 1405,\n 1215,\n 1149,\n 1041,\n 1139,\n 1225,\n 1371,\n 1413,\n 1541,\n 1579,\n 1534,\n 1505,\n 1293,\n 1382,\n 1398,\n 1307,\n 1292,\n 1185,\n 1075,\n 917,\n 930,\n 950,\n 1048,\n 1071,\n 918,\n 867,\n 814,\n 860,\n 670,\n 663,\n 683,\n 618,\n 551,\n 400,\n 419,\n 422,\n 396,\n 416,\n 445,\n 367,\n 377,\n 369,\n 399,\n 445,\n 516,\n 537,\n 493,\n 460,\n 473,\n 455,\n 473,\n 425,\n 526,\n 596,\n 871,\n 1285,\n 418,\n 215,\n 220,\n 222,\n 218,\n 204,\n 223,\n 192,\n 238,\n 228,\n 208,\n 200,\n 246,\n 956,\n 2931,\n 2363,\n 1874,\n 1742,\n 2110,\n 2257,\n 2155,\n 2012,\n 2050,\n 1914,\n 1849,\n 1707,\n 1523,\n 1548,\n 1372,\n 1447,\n 1357,\n 1293,\n 1061,\n 1100,\n 1212,\n 1337,\n 1473,\n 1752,\n 1668,\n 1566,\n 1539,\n 1655,\n 2006,\n 2315,\n 2097,\n 2293,\n 1995,\n 1961,\n 1985,\n 1746,\n 2038,\n 2092,\n 1902,\n 1189,\n 1057,\n 1035,\n 1083,\n 974,\n 1018,\n 1121,\n 1264,\n 1433,\n 1502,\n 1508,\n 1336,\n 1112,\n 969,\n 994,\n 1206,\n 1225,\n 1314,\n 1384,\n 1544,\n 1580,\n 1453,\n 1430,\n 1439,\n 1381,\n 1306,\n 1322,\n 1148,\n 1056,\n 891,\n 811,\n 889,\n 1087,\n 960,\n 804,\n 839,\n 773,\n 772,\n 636,\n 596,\n 517,\n 494,\n 480,\n 443,\n 412,\n 393,\n 386,\n 450,\n 594,\n 441,\n 450,\n 423,\n 413,\n 477,\n 491,\n 514,\n 511,\n 436,\n 468,\n 454,\n 462,\n 415,\n 473,\n 615,\n 795,\n 1191,\n 566,\n 244,\n 211,\n 204,\n 245,\n 209,\n 218,\n 191,\n 239,\n 198,\n 201,\n 216,\n 247,\n 735,\n 2961,\n 2473,\n 1936,\n 1991,\n 2168,\n 2318,\n 2209,\n 1983,\n 2018,\n 1999,\n 1737,\n 1508,\n 1523,\n 1644,\n 1496,\n 1420,\n 1319,\n 1247,\n 1062,\n 1145,\n 1203,\n 1378,\n 1786,\n 1824,\n 1455,\n 1454,\n 1622,\n 1730,\n 1854,\n 2138,\n 2259,\n 2208,\n 1839,\n 1918,\n 1935,\n 1683,\n 1921,\n 1964,\n 1406,\n 998,\n 928,\n 961,\n 1207,\n 1046,\n 1014,\n 1241,\n 1286,\n 1465,\n 1512,\n 1376,\n 1291,\n 1028,\n 888,\n 957,\n 1119,\n 1126,\n 1287,\n 1442,\n 1623,\n 1601,\n 1453,\n 1408,\n 1378,\n 1288,\n 1222,\n 1251,\n 1182,\n 950,\n 861,\n 651,\n 819,\n 891,\n 797,\n 760,\n 748,\n 621,\n 573,\n 522,\n 482,\n 450,\n 488,\n 424,\n 426,\n 433,\n 415,\n 429,\n 451,\n 566,\n 449,\n 414,\n 455,\n 484,\n 555,\n 579,\n 529,\n 503,\n 414,\n 479,\n 457,\n 487,\n 478,\n 560,\n 592,\n 804,\n 1163,\n 414,\n 220,\n 231,\n 208,\n 209,\n 203,\n 218,\n 200,\n 202,\n 196,\n 190,\n 199,\n 235,\n 558,\n 2644,\n 2509,\n 1965,\n 2024,\n 2131,\n 2308,\n 2165,\n 2120,\n 2055,\n 1962,\n 1763,\n 1576,\n 1538,\n 1609,\n 1475,\n 1318,\n 1347,\n 1222,\n 1328,\n 1354,\n 1367,\n 1287,\n 1603,\n 1631,\n 1475,\n 1435,\n 1677,\n 1871,\n 1986,\n 2316,\n 2359,\n 2002,\n 2044,\n 1852,\n 1868,\n 1735,\n 1764,\n 1714,\n 1288,\n 1010,\n 908,\n 1078,\n 1223,\n 1111,\n 1080,\n 1220,\n 1276,\n 1423,\n 1613,\n 1361,\n 1355,\n 998,\n 853,\n 1034,\n 969,\n 1029,\n 1298,\n 1511,\n 1637,\n 1592,\n 1536,\n 1551,\n 1297,\n 1291,\n 1168,\n 1114,\n 1116,\n 1057,\n 801,\n 778,\n 754,\n 712,\n 726,\n 631,\n 547,\n 631,\n 568,\n 492,\n 495,\n 501,\n 467,\n 482,\n 500,\n 507,\n 451,\n 427,\n 473,\n 544,\n 602,\n 432,\n 471,\n 512,\n 482,\n 555,\n 469,\n 475,\n 482,\n 468,\n 428,\n 487,\n 447,\n 447,\n 557,\n 914,\n 1227,\n 385,\n 221,\n 191,\n 205,\n 229,\n 204,\n 222,\n 194,\n 214,\n 207,\n 212,\n 213,\n 191,\n 307,\n 1746,\n 2981,\n 2185,\n 1916,\n 2031,\n 2324,\n 2127,\n 1988,\n 1906,\n 1875,\n 1875,\n 1662,\n 1465,\n 1609,\n 1467,\n 1367,\n 1402,\n 1445,\n 1455,\n 1579,\n 1373,\n 1416,\n 1486,\n 1343,\n 1457,\n 1336,\n 1681,\n 1798,\n 1890,\n 2297,\n 2139,\n 1792,\n 1840,\n 1890,\n 1903,\n 1658,\n 1627,\n 1521,\n 1265,\n 1137,\n 994,\n 1128,\n 1081,\n 1032,\n 996,\n 1162,\n 1248,\n 1335,\n 1463,\n 1399,\n 1489,\n 1210,\n 979,\n 961,\n 905,\n 966,\n 1180,\n 1451,\n 1564,\n 1504,\n 1629,\n 1586,\n 1477,\n 1354,\n 1160,\n 1162,\n 992,\n 1048,\n 838,\n 782,\n 745,\n 799,\n 715,\n 648,\n 546,\n 538,\n 482,\n 480,\n 469,\n 534,\n 537,\n 526,\n 447,\n 483,\n 452,\n 399,\n 609,\n 598,\n 583,\n 510,\n 454,\n 444,\n 475,\n 565,\n 555,\n 479,\n 419,\n 420,\n 448,\n 448,\n 448,\n 483,\n 616,\n 837,\n 1233,\n 411,\n 211,\n 205,\n 221,\n 218,\n 206,\n 213,\n 202,\n 236,\n 224,\n 233,\n 228,\n 197,\n 272,\n 1378,\n 3138,\n 2363,\n 1973,\n 2113,\n 2274,\n 2082,\n 1927,\n 2017,\n 1923,\n 1772,\n 1673,\n 1516,\n 1504,\n 1490,\n 1422,\n 1458,\n 1527,\n 1601,\n 1624,\n 1543,\n 1424,\n 1464,\n 1527,\n 1336,\n 1516,\n 1676,\n 1723,\n 1836,\n 2150,\n 1966,\n 1595,\n 1569,\n 1901,\n 1694,\n 1629,\n 1457,\n 1472,\n 1206,\n 976,\n 986,\n 1064,\n 1008,\n 868,\n 1009,\n 1155,\n 1187,\n 1393,\n 1519,\n 1452,\n 1466,\n 1277,\n 1161,\n 1051,\n 1044,\n 1080,\n 1364,\n 1586,\n 1640,\n 1491,\n 1641,\n 1612,\n 1399,\n 1240,\n 1146,\n 1098,\n 961,\n 1011,\n 833,\n 748,\n 636,\n 762,\n 711,\n 620,\n 610,\n 527,\n 525,\n 522,\n 504,\n 564,\n 757,\n 504,\n 425,\n 478,\n 436,\n 432,\n 513,\n 457,\n 450,\n 429,\n 461,\n 402,\n 496,\n 518,\n 509,\n 409,\n 462,\n 448,\n 446,\n 485,\n 468,\n 493,\n 589,\n 882,\n 1119,\n 364,\n 214,\n 196,\n 194,\n 208,\n 209,\n 205,\n 225,\n 213,\n 200,\n 217,\n 204,\n 229,\n 242,\n 883,\n 3024,\n 2298,\n 1959,\n 2093,\n 2281,\n 2077,\n 1882,\n 1958,\n 1908,\n 1712,\n 1718,\n 1637,\n 1510,\n 1460,\n 1328,\n 1430,\n 1650,\n 1654,\n 1500,\n 1333,\n 1465,\n 1577,\n 1479,\n 1481,\n 1696,\n 1641,\n 1687,\n 1847,\n 2018,\n 1530,\n 1403,\n 1461,\n 1588,\n 1546,\n 1449,\n 1380,\n 1405,\n 1143,\n 1057,\n 1105,\n 1061,\n 802,\n 894,\n 1016,\n 1057,\n 1168,\n 1281,\n 1438,\n 1419,\n 1280,\n 1038,\n 1084,\n 1167,\n 1148,\n 1246,\n 1406,\n 1532,\n 1619,\n 1460,\n 1477,\n 1552,\n 1301,\n 1242,\n 1117,\n 1070,\n 949,\n 1036,\n 790,\n 741,\n 709,\n 815,\n 676,\n 615,\n 571,\n 615,\n 476,\n 470,\n 499,\n 515,\n 486,\n 491,\n 434,\n 477,\n 467,\n 470,\n 422,\n 462,\n 453,\n 546,\n 479,\n 419,\n 449,\n 494,\n 452,\n 482,\n 455,\n 451,\n 430,\n 474,\n 469,\n 504,\n 722,\n 985,\n 1048,\n 315,\n 216,\n 203,\n 211,\n 198,\n 221,\n 203,\n 211,\n 199,\n 190,\n 209,\n 238,\n 203,\n 235,\n 688,\n 2792,\n 2517,\n 1965,\n 1950,\n 2198,\n 2097,\n 1898,\n 2004,\n 1808,\n 1701,\n 1630,\n 1599,\n 1436,\n 1370,\n 1289,\n 1275,\n 1769,\n 1700,\n 1464,\n 1308,\n 1554,\n 1427,\n 1573,\n 1654,\n 1749,\n 1589,\n 1609,\n 1814,\n 1641,\n 1437,\n 1363,\n 1430,\n 1409,\n 1423,\n 1455,\n 1231,\n 1368,\n 1148,\n 1157,\n 1069,\n 920,\n 699,\n 723,\n 923,\n 1192,\n 1097,\n 1337,\n 1471,\n 1302,\n 1264,\n 971,\n 862,\n 1167,\n 1203,\n 1180,\n 1390,\n 1531,\n 1463,\n 1403,\n 1361,\n 1545,\n 1318,\n 1164,\n 1201,\n 962,\n 971,\n 957,\n 789,\n 684,\n 691,\n 834,\n 725,\n 596,\n 618,\n 507,\n 542,\n 449,\n 543,\n 645,\n 609,\n 480,\n 438,\n 463,\n 545,\n 548,\n 507,\n 469,\n 547,\n 631,\n 511,\n 452,\n 493,\n 605,\n 477,\n 462,\n 452,\n 442,\n 412,\n 439,\n 460,\n 543,\n 707,\n 1018,\n 939,\n 305,\n 231,\n 205,\n 222,\n 191,\n 204,\n 179,\n 222,\n 184,\n 210,\n 209,\n 208,\n 212,\n 229,\n 496,\n 2507,\n 2709,\n 2032,\n 1897,\n 2177,\n 2044,\n 1921,\n 1875,\n 1832,\n 1754,\n 1641,\n 1564,\n 1422,\n 1413,\n 1252,\n 1402,\n 1815,\n 1582,\n 1480,\n 1512,\n 1528,\n 1507,\n 1598,\n 1672,\n 1583,\n 1493,\n 1485,\n 1642,\n 1497,\n 1445,\n 1321,\n 1351,\n 1339,\n 1301,\n 1164,\n 1160,\n 1184,\n 1041,\n 1042,\n 837,\n 840,\n 838,\n 761,\n 1081,\n 1284,\n 1294,\n 1455,\n 1531,\n 1455,\n 1298,\n 986,\n 908,\n 1050,\n 1169,\n 1199,\n 1348,\n 1411,\n 1400,\n 1366,\n 1451,\n 1504,\n 1282,\n 1200,\n 1172,\n 996,\n 1036,\n 950,\n 846,\n 751,\n 719,\n 875,\n 590,\n 566,\n 577,\n 530,\n 441,\n 513,\n 566,\n 1051,\n 678,\n 524,\n 446,\n 559,\n 599,\n 473,\n 486,\n 519,\n 481,\n 548,\n 405,\n 446,\n 498,\n 534,\n 466,\n 419,\n 400,\n 402,\n 431,\n 446,\n 468,\n 553,\n 760,\n 1154,\n 791,\n 265,\n 227,\n 188,\n 212,\n 191,\n 209,\n 201,\n 204,\n 218,\n 199,\n 185,\n 210,\n 214,\n 195,\n 270,\n 1374,\n 2918,\n 2267,\n 1933,\n 2145,\n 2015,\n 1908,\n 1884,\n 1820,\n 1572,\n 1573,\n 1590,\n 1468,\n 1432,\n 1337,\n 1403,\n 1895,\n 1618,\n 1589,\n 1597,\n 1715,\n 1613,\n 1770,\n 1615,\n 1457,\n 1485,\n 1556,\n 1572,\n 1346,\n 1347,\n 1280,\n 1230,\n 1194,\n 1207,\n 1031,\n 985,\n 1057,\n 1097,\n 819,\n 828,\n 829,\n 868,\n 843,\n 997,\n 1197,\n 1393,\n 1321,\n 1353,\n 1318,\n 1255,\n 1059,\n 920,\n 993,\n 1123,\n 1207,\n 1291,\n 1229,\n 1312,\n 1274,\n 1391,\n 1569,\n 1355,\n 1125,\n 1220,\n 1012,\n 990,\n 981,\n 882,\n 767,\n 746,\n 838,\n 726,\n 632,\n 520,\n 597,\n 557,\n 493,\n 464,\n 564,\n 487,\n 448,\n 510,\n 492,\n 474,\n 552,\n 409,\n 404,\n 430,\n 490,\n 415,\n 446,\n 620,\n 562,\n 440,\n 440,\n 422,\n 453,\n 424,\n 416,\n 462,\n 573,\n 748,\n 1098,\n 906,\n 269,\n 199,\n 217,\n 216,\n 190,\n 214,\n 216,\n 235,\n 194,\n 226,\n 213,\n 207,\n 213,\n 233,\n 273,\n 1155,\n 2946,\n 2427,\n 2039,\n 2188,\n 2126,\n 1865,\n 1737,\n 1685,\n 1554,\n 1547,\n 1476,\n 1464,\n 1438,\n 1389,\n 1475,\n 1671,\n 1645,\n 1732,\n 1783,\n 1676,\n 2004,\n 1688,\n 1666,\n 1496,\n 1523,\n 1502,\n 1377,\n 1297,\n 1273,\n 1283,\n 1159,\n 1144,\n 941,\n 935,\n 833,\n 907,\n 824,\n 755,\n 715,\n 810,\n 877,\n 965,\n 1086,\n 1225,\n 1356,\n 1427,\n 1302,\n 1304,\n 1270,\n 1210,\n 920,\n 1027,\n 1090,\n 1126,\n 1247,\n 1301,\n 1245,\n 1321,\n 1319,\n 1402,\n 1266,\n 1211,\n 1025,\n 904,\n 997,\n 1025,\n 764,\n 851,\n 834,\n 764,\n 670,\n 585,\n 547,\n 541,\n 480,\n 484,\n 544,\n 482,\n 486,\n 469,\n 528,\n 505,\n 526,\n 520,\n 446,\n 480,\n 537,\n 444,\n 495,\n 571,\n 559,\n 460,\n 457,\n 401,\n 399,\n 452,\n 439,\n 445,\n 469,\n 603,\n 856,\n 1254,\n 609,\n 256,\n 208,\n 190,\n 223,\n 188,\n 211,\n 224,\n 182,\n 219,\n 187,\n 213,\n 180,\n 206,\n 258,\n 225,\n 518,\n 2515,\n 2796,\n 1990,\n 2155,\n 2253,\n 1828,\n 1752,\n 1720,\n 1751,\n 1506,\n 1419,\n 1437,\n 1356,\n 1398,\n 1371,\n 1597,\n 1452,\n 1599,\n 1729,\n 1814,\n 1752,\n 1704,\n 1577,\n 1510,\n 1569,\n 1410,\n 1255,\n 1225,\n 1263,\n 1232,\n 1151,\n 1152,\n 1043,\n 897,\n 841,\n 884,\n 861,\n 907,\n 860,\n 1005,\n 927,\n 982,\n 1145,\n 1230,\n 1324,\n 1225,\n 1294,\n 1197,\n 1309,\n 1209,\n 954,\n 1005,\n 1096,\n 1067,\n 1140,\n 1276,\n 1324,\n 1418,\n 1392,\n 1463,\n 1262,\n 1199,\n 1078,\n 904,\n 1036,\n 971,\n 848,\n 788,\n 804,\n 768,\n 684,\n 598,\n 526,\n 560,\n 540,\n 564,\n 520,\n 511,\n 536,\n 449,\n 521,\n 477,\n 508,\n 526,\n 441,\n 709,\n 568,\n 530,\n 651,\n 641,\n 483,\n 513,\n 406,\n 448,\n 417,\n 459,\n 441,\n 413,\n 478,\n 594,\n 805,\n 1152,\n 485,\n 208,\n 237,\n 216,\n 212,\n 223,\n 198,\n 232,\n 212,\n 216,\n 229,\n 177,\n 200,\n 195,\n 215,\n 237,\n 312,\n 1603,\n 2932,\n 2263,\n 2086,\n 2323,\n 1998,\n 1633,\n 1613,\n 1723,\n 1588,\n 1358,\n 1295,\n 1351,\n 1423,\n 1271,\n 1411,\n 1480,\n 1542,\n 1711,\n 1886,\n 1741,\n 1523,\n 1508,\n 1498,\n 1542,\n 1434,\n 1320,\n 1301,\n 1239,\n 1208,\n 1099,\n 1028,\n 937,\n 879,\n 857,\n 931,\n 1048,\n 1040,\n 1147,\n 1132,\n 1140,\n 1076,\n 1387,\n 1452,\n 1398,\n 1204,\n 1227,\n 1205,\n 1110,\n 987,\n 933,\n 957,\n 1161,\n 984,\n 1245,\n 1254,\n 1304,\n 1322,\n 1388,\n 1306,\n 1257,\n 1218,\n 978,\n 953,\n 1041,\n 983,\n 763,\n 783,\n 802,\n 777,\n 643,\n 497,\n 493,\n 522,\n 550,\n 600,\n 489,\n 509,\n 457,\n 488,\n 518,\n 498,\n 452,\n 433,\n 484,\n 525,\n 483,\n 575,\n 613,\n 564,\n 502,\n 390,\n 446,\n 412,\n 410,\n 448,\n 469,\n 411,\n 487,\n 622,\n 851,\n 1085,\n 348,\n 211,\n 221,\n 213,\n 205,\n 193,\n 212,\n 208,\n 217,\n 226,\n 226,\n 214,\n 202,\n 173,\n 198,\n 204,\n 237,\n 753,\n 2643,\n 2526,\n 2089,\n 2295,\n 2006,\n 1658,\n 1610,\n 1618,\n 1538,\n 1485,\n 1364,\n 1305,\n 1371,\n 1163,\n 1337,\n 1472,\n 1484,\n 1709,\n 1849,\n 1670,\n 1477,\n 1386,\n 1433,\n 1416,\n 1468,\n 1449,\n 1441,\n 1396,\n 1270,\n 1058,\n 1051,\n 929,\n 948,\n 882,\n 976,\n 1002,\n 1106,\n 1212,\n 1234,\n 1307,\n 1301,\n 1400,\n 1338,\n 1227,\n 1106,\n 1170,\n 1043,\n 1052,\n 961,\n 890,\n 918,\n 969,\n 1043,\n 1220,\n 1310,\n 1238,\n 1326,\n 1219,\n 1341,\n 1281,\n 1220,\n 935,\n 933,\n 972,\n 962,\n 812,\n 786,\n 841,\n 834,\n 661,\n 534,\n 593,\n 607,\n 593,\n 569,\n 475,\n 530,\n 463,\n 469,\n 507,\n 472,\n 492,\n 473,\n 473,\n 457,\n 464,\n 631,\n 593,\n 575,\n 452,\n 469,\n 438,\n 401,\n 404,\n 432,\n 395,\n 391,\n 492,\n 576,\n 788,\n 1008,\n 334,\n 251,\n 215,\n 223,\n 212,\n 196,\n 207,\n 218,\n 197,\n 212,\n 204,\n 219,\n 191,\n 208,\n 193,\n 230,\n 217,\n 442,\n 2109,\n 2907,\n 2155,\n 2408,\n 2036,\n 1703,\n 1600,\n 1606,\n 1574,\n 1502,\n 1437,\n 1365,\n 1220,\n 989,\n 1245,\n 1425,\n 1377,\n 1741,\n 1818,\n 1614,\n 1367,\n 1459,\n 1434,\n 1342,\n 1460,\n 1465,\n 1364,\n 1408,\n 1251,\n 1227,\n 1191,\n 1099,\n 980,\n 912,\n 1077,\n 1115,\n 1180,\n 1189,\n 1232,\n 1333,\n 1539,\n 1386,\n 1367,\n 1321,\n 1038,\n 1113,\n 1015,\n 1080,\n 911,\n 842,\n 869,\n 1052,\n 1094,\n 1208,\n 1347,\n 1209,\n 1201,\n 1262,\n 1265,\n 1226,\n 1194,\n 891,\n 902,\n 951,\n 908,\n 722,\n 692,\n 776,\n 809,\n 667,\n 660,\n 705,\n 599,\n 538,\n 554,\n 524,\n 523,\n 524,\n 492,\n 486,\n 459,\n 529,\n 503,\n 499,\n 410,\n 475,\n 536,\n 521,\n 537,\n 465,\n 387,\n 383,\n 436,\n 460,\n 429,\n 421,\n 418,\n 417,\n 614,\n 915,\n 919,\n 239,\n 212,\n 201,\n 218,\n 206,\n 204,\n 225,\n 200,\n 212,\n 213,\n 199,\n 212,\n 194,\n 193,\n 212,\n 191,\n 198,\n 266,\n 1403,\n 2895,\n 2567,\n 2316,\n 2321,\n 1811,\n 1601,\n 1638,\n 1694,\n 1562,\n 1431,\n 1341,\n 1273,\n 1249,\n 1362,\n 1414,\n 1487,\n 1766,\n 1861,\n 1553,\n 1500,\n 1595,\n 1484,\n 1340,\n 1349,\n 1380,\n 1360,\n 1373,\n 1303,\n 1222,\n 1115,\n 1064,\n 1037,\n 950,\n 963,\n 1118,\n 1112,\n 1120,\n 1162,\n 1373,\n 1392,\n 1361,\n 1369,\n 1165,\n 1072,\n 1088,\n 1093,\n 1026,\n 909,\n 856,\n 858,\n 976,\n 1078,\n 1145,\n 1261,\n 1244,\n 1236,\n 1325,\n 1436,\n 1380,\n 1145,\n 979,\n 834,\n 956,\n 876,\n 776,\n 813,\n 758,\n 699,\n 662,\n 650,\n 686,\n 583,\n 569,\n 506,\n 492,\n 497,\n 555,\n 440,\n 479,\n 501,\n 502,\n 540,\n 424,\n 492,\n 512,\n 545,\n 531,\n 568,\n 471,\n 434,\n 397,\n 447,\n 407,\n 462,\n 448,\n 472,\n 513,\n 679,\n 1015,\n 697,\n 211,\n 212,\n 218,\n 219,\n 175,\n 226,\n 188,\n 210,\n 211,\n 223,\n 226,\n 231,\n 230,\n 226,\n 220,\n 209,\n 205,\n 222,\n 693,\n 2270,\n 2790,\n 2517,\n 2274,\n 1840,\n 1519,\n 1634,\n 1579,\n 1497,\n 1404,\n 1317,\n 1200,\n 1194,\n 1249,\n 1444,\n 1458,\n 1645,\n 1673,\n 1368,\n 1496,\n 1532,\n 1447,\n 1389,\n 1209,\n 1283,\n 1332,\n 1435,\n 1483,\n 1227,\n 1182,\n 1188,\n 1064,\n 968,\n 987,\n 1090,\n 1130,\n 1183,\n 1247,\n 1421,\n 1371,\n 1166,\n 1047,\n 1056,\n 931,\n 886,\n 964,\n 1051,\n 974,\n 880,\n 882,\n 1001,\n 967,\n 1122,\n 1159,\n 1301,\n 1249,\n 1267,\n 1334,\n 1370,\n 1112,\n 961,\n 874,\n 908,\n 920,\n 823,\n 761,\n 770,\n 767,\n 749,\n 601,\n 569,\n 574,\n 498,\n 563,\n 462,\n 512,\n 522,\n 442,\n 481,\n 469,\n 428,\n 409,\n 512,\n 464,\n 450,\n 487,\n 442,\n 469,\n 457,\n 415,\n 384,\n 422,\n 439,\n 466,\n 446,\n 423,\n 546,\n 642,\n 995,\n 663,\n 239,\n 209,\n 227,\n 217,\n 215,\n 207,\n 199,\n 221,\n 196,\n 216,\n 199,\n 204,\n 224,\n 207,\n 217,\n 181,\n 201,\n 210,\n 392,\n 1822,\n 2904,\n 2628,\n 2281,\n 1813,\n 1587,\n 1567,\n 1546,\n 1552,\n 1416,\n 1307,\n 1302,\n 1302,\n 1305,\n 1391,\n 1327,\n 1544,\n 1399,\n 1383,\n 1355,\n 1404,\n 1421,\n 1358,\n 1278,\n 1292,\n 1329,\n 1458,\n 1236,\n 1224,\n 1061,\n 1118,\n 1151,\n 1222,\n 1378,\n 1347,\n 1266,\n 1255,\n 1424,\n 1323,\n 1088,\n 996,\n 911,\n 913,\n 910,\n 831,\n 981,\n 1077,\n 1002,\n 910,\n 869,\n 1078,\n 1100,\n 1067,\n 1172,\n 1231,\n 1283,\n 1328,\n 1365,\n 1249,\n 1147,\n 902,\n 916,\n 1034,\n 942,\n 877,\n 769,\n 745,\n 794,\n 708,\n 584,\n 533,\n 515,\n 507,\n 475,\n 501,\n 495,\n 466,\n 502,\n 454,\n 479,\n 455,\n 506,\n 471,\n 474,\n 512,\n 582,\n 622,\n 564,\n 450,\n 423,\n 410,\n 418,\n 432,\n 511,\n 485,\n 498,\n 570,\n 748,\n 1016,\n 387,\n 227,\n 174,\n 214,\n 198,\n 239,\n 213,\n 205,\n 178,\n 205,\n 198,\n 198,\n 220,\n 203,\n 196,\n 205,\n 202,\n 223,\n 203,\n 308,\n 1217,\n 2571,\n 2865,\n 2647,\n 1919,\n 1609,\n 1503,\n 1667,\n 1489,\n 1565,\n 1426,\n 1280,\n 1304,\n 1290,\n 1372,\n 1411,\n 1402,\n 1345,\n 1303,\n 1385,\n 1321,\n 1486,\n 1348,\n 1343,\n 1287,\n 1332,\n 1310,\n 1252,\n 1193,\n 1133,\n 1180,\n 1171,\n 1202,\n 1396,\n 1332,\n 1274,\n 1258,\n 1217,\n 991,\n 939,\n 861,\n 904,\n 755,\n 820,\n 875,\n 920,\n 976,\n 932,\n 890,\n 907,\n 1038,\n 1117,\n 1158,\n 1167,\n 1233,\n 1331,\n 1340,\n 1341,\n 1274,\n 1170,\n 964,\n 933,\n 1062,\n 1000,\n 800,\n 755,\n 793,\n 782,\n 740,\n 561,\n 557,\n 521,\n 484,\n 476,\n 484,\n 471,\n 579,\n 595,\n 477,\n 472,\n 474,\n 418,\n 508,\n 493,\n 492,\n 530,\n 540,\n 445,\n 424,\n 444,\n 431,\n 478,\n 405,\n 521,\n 555,\n 559,\n 661,\n 864,\n 955,\n 306,\n 198,\n 209,\n 204,\n 184,\n 222,\n 191,\n 201,\n 201,\n 204,\n 211,\n 234,\n 178,\n 198,\n 212,\n 201,\n 225,\n 202,\n 237,\n 237,\n 492,\n 1916,\n 2798,\n 2868,\n 2264,\n 1671,\n 1532,\n 1655,\n 1605,\n 1414,\n 1488,\n 1336,\n 1303,\n 1271,\n 1278,\n 1368,\n 1322,\n 1395,\n 1339,\n 1278,\n 1322,\n 1230,\n 1188,\n 1436,\n 1356,\n 1341,\n 1315,\n 1266,\n 1138,\n 1164,\n 1180,\n 1129,\n 1082,\n 1144,\n 1225,\n 1092,\n 1118,\n 1067,\n 990,\n 919,\n 889,\n 814,\n 765,\n 844,\n 795,\n 867,\n 829,\n 816,\n 867,\n 843,\n 1002,\n 1062,\n 1031,\n 1130,\n 1172,\n 1193,\n 1313,\n 1282,\n 1356,\n 1179,\n 935,\n 909,\n 955,\n 1003,\n 832,\n 745,\n 763,\n 763,\n 701,\n 632,\n 538,\n 553,\n 572,\n 514,\n 485,\n 456,\n 514,\n 524,\n 474,\n 452,\n 499,\n 452,\n 496,\n 491,\n 506,\n 557,\n 457,\n 430,\n 424,\n 389,\n 466,\n 464,\n 408,\n 491,\n 519,\n 584,\n 689,\n 940,\n 915,\n 259,\n 216,\n 183,\n 192,\n 212,\n 226,\n 200,\n 186,\n 224,\n 196,\n 203,\n 210,\n 203,\n 176,\n 218,\n 214,\n 248,\n 255,\n 258,\n 297,\n 387,\n 1308,\n 2228,\n 3077,\n 2608,\n 1870,\n 1495,\n 1624,\n 1605,\n 1492,\n 1529,\n 1497,\n 1412,\n 1071,\n 1244,\n 1321,\n 1346,\n 1386,\n 1240,\n 1244,\n 1190,\n 1150,\n 1219,\n 1350,\n 1334,\n 1344,\n 1245,\n 1295,\n 1253,\n 1176,\n 1166,\n 1146,\n 1102,\n 1073,\n 1195,\n 1101,\n 1150,\n 1125,\n 926,\n 924,\n 929,\n 803,\n 817,\n 796,\n 806,\n 833,\n 832,\n 859,\n 845,\n 920,\n 950,\n 1121,\n 1085,\n 1200,\n 1237,\n 1366,\n 1208,\n 1186,\n 1346,\n 1282,\n 980,\n 913,\n 925,\n 967,\n 877,\n 824,\n 714,\n 781,\n 672,\n 585,\n 557,\n 550,\n 495,\n 489,\n 522,\n 479,\n 389,\n 468,\n 447,\n 496,\n 489,\n 530,\n 505,\n 510,\n 513,\n 594,\n 439,\n 381,\n 448,\n 405,\n 413,\n 435,\n 472,\n 479,\n 518,\n 611,\n 725,\n 927,\n 865,\n 287,\n 205,\n 200,\n 207,\n 188,\n 211,\n 223,\n 231,\n 216,\n 211,\n 200,\n 209,\n 216,\n 219,\n 255,\n 384,\n 711,\n 1236,\n 1968,\n 2087,\n 1856,\n 1871,\n 2937,\n 4204,\n 3945,\n 2336,\n 1740,\n 1685,\n 1632,\n 1570,\n 1511,\n 1483,\n 1458,\n 1177,\n 1189,\n 1240,\n 1435,\n 1408,\n 1294,\n 1202,\n 1225,\n 1296,\n 1338,\n 1304,\n 1285,\n 1252,\n 1237,\n 1198,\n 1150,\n 1125,\n 1207,\n 1086,\n 1115,\n 1204,\n 1145,\n 1034,\n 1139,\n 1101,\n 1029,\n 958,\n 881,\n 764,\n 809,\n 823,\n 797,\n 844,\n 874,\n 873,\n 889,\n 911,\n 1010,\n 1002,\n 1031,\n 1194,\n 1328,\n 1297,\n 1285,\n 1309,\n 1356,\n 1138,\n 924,\n 941,\n 983,\n 878,\n 757,\n 764,\n 735,\n 675,\n 622,\n 601,\n 527,\n 580,\n 531,\n 551,\n 527,\n 465,\n 426,\n 494,\n 471,\n 485,\n 498,\n 540,\n 497,\n 556,\n 551,\n 475,\n 468,\n 470,\n 453,\n 445,\n 437,\n 416,\n 441,\n 467,\n 498,\n 544,\n 634,\n 904,\n 358,\n 192,\n 196,\n 189,\n 208,\n 203,\n 206,\n 194,\n 210,\n 246,\n 187,\n 218,\n 204,\n 224,\n 490,\n 1336,\n 2137,\n 2027,\n 1360,\n 1218,\n 1079,\n 775,\n 607,\n 1308,\n 2634,\n 4279,\n 3956,\n 2924,\n 2865,\n 2857,\n 2317,\n 1903,\n 1687,\n 1579,\n 1343,\n 1034,\n 1264,\n 1347,\n 1403,\n 1273,\n 1300,\n 1159,\n 1230,\n 1290,\n 1339,\n 1224,\n 1209,\n 1237,\n 1165,\n 1159,\n 1210,\n 1180,\n 1222,\n 1232,\n 1152,\n 1210,\n 1080,\n 1107,\n 1220,\n 1292,\n 1062,\n 819,\n 856,\n 899,\n 831,\n 808,\n 867,\n 869,\n 777,\n 929,\n 960,\n 947,\n 1013,\n 1065,\n 1121,\n 1226,\n 1234,\n 1310,\n 1249,\n 1285,\n 1145,\n 905,\n 797,\n 954,\n 854,\n 811,\n 764,\n 724,\n 680,\n 718,\n 542,\n 509,\n 553,\n 541,\n 492,\n 511,\n 513,\n 495,\n 527,\n 439,\n 496,\n 485,\n 517,\n 528,\n 437,\n 486,\n 511,\n 474,\n 494,\n 463,\n 446,\n 443,\n 450,\n 426,\n 433,\n 481,\n 485,\n 551,\n 482,\n 246,\n 201,\n 208,\n 181,\n 183,\n 213,\n 185,\n 203,\n 203,\n 212,\n 209,\n 207,\n 247,\n 909,\n 1809,\n 1255,\n 952,\n 798,\n 854,\n 793,\n 648,\n 542,\n 389,\n 658,\n 2384,\n 3653,\n 4136,\n 2766,\n 2262,\n 2125,\n 2065,\n 2244,\n 2244,\n 2229,\n 2045,\n 1587,\n 1340,\n 1554,\n 1570,\n 1362,\n 1311,\n 1173,\n 1306,\n 1369,\n 1311,\n 1186,\n 1264,\n 1110,\n 1204,\n 1137,\n 1101,\n 1228,\n 1221,\n 1222,\n 1247,\n 1213,\n 1142,\n 1335,\n 1186,\n 1135,\n 1124,\n 980,\n 927,\n 889,\n 865,\n 841,\n 863,\n 864,\n 842,\n 827,\n 960,\n 941,\n 1059,\n 1081,\n 1096,\n 1158,\n 1190,\n 1193,\n 1293,\n 1399,\n 1096,\n 879,\n 854,\n 987,\n 940,\n 875,\n 749,\n 697,\n 691,\n 646,\n 585,\n 514,\n 568,\n 557,\n 518,\n 481,\n 515,\n 563,\n 470,\n 528,\n 561,\n 577,\n 562,\n 564,\n 482,\n 503,\n 572,\n 497,\n 404,\n 429,\n 438,\n 376,\n 395,\n 418,\n 453,\n 563,\n 520,\n 674,\n 598,\n 260,\n 177,\n 212,\n 188,\n 227,\n 197,\n 189,\n 219,\n 198,\n 190,\n 187,\n 299,\n 835,\n 1027,\n 822,\n 525,\n 397,\n 337,\n 288,\n 302,\n 247,\n 245,\n 295,\n 762,\n 1715,\n 3255,\n 4113,\n 3342,\n 2343,\n 2053,\n 1823,\n 1867,\n 1956,\n 1936,\n 1792,\n 1649,\n 1511,\n 1743,\n 2046,\n 1935,\n 1703,\n 1654,\n 1618,\n 1544,\n 1425,\n 1407,\n 1333,\n 1142,\n 1155,\n 1168,\n 1149,\n 1102,\n 1064,\n 1209,\n 1284,\n 1130,\n 1165,\n 1113,\n 1117,\n 1116,\n 1101,\n 1007,\n 970,\n 942,\n 966,\n 986,\n 963,\n 896,\n 841,\n 839,\n 883,\n 885,\n 1045,\n 1066,\n 1101,\n 1172,\n 1277,\n 1234,\n 1217,\n 1322,\n 1139,\n 907,\n 867,\n 900,\n 1017,\n 794,\n 713,\n 647,\n 706,\n 616,\n 533,\n 603,\n 537,\n 486,\n 520,\n 560,\n 618,\n 640,\n 697,\n 728,\n 654,\n 744,\n 702,\n 683,\n 633,\n 525,\n 568,\n 477,\n 390,\n 420,\n 410,\n 350,\n 382,\n 387,\n 397,\n 502,\n 566,\n 695,\n 677,\n 266,\n 203,\n 207,\n 223,\n 203,\n 217,\n 171,\n 217,\n 210,\n 192,\n 195,\n 755,\n 522,\n 335,\n 253,\n 255,\n 214,\n 257,\n 216,\n 230,\n 229,\n 243,\n 543,\n 1025,\n 1447,\n 2843,\n 3592,\n 3671,\n 2647,\n 2032,\n 1726,\n 1775,\n 1833,\n 1883,\n 1885,\n 1809,\n 1568,\n 1579,\n 1866,\n 1688,\n 1763,\n 1681,\n 1607,\n 1567,\n 1479,\n 1439,\n 1366,\n 1275,\n 1412,\n 1352,\n 1211,\n 1086,\n 1075,\n 1118,\n 1064,\n 1029,\n 994,\n 1090,\n 1153,\n 1102,\n 1057,\n 1024,\n 928,\n 1012,\n 1027,\n 1054,\n 992,\n 842,\n 859,\n 850,\n 877,\n 1009,\n 1042,\n 1142,\n 1114,\n 1228,\n 1369,\n 1319,\n 1208,\n 1326,\n 1114,\n 865,\n 790,\n 928,\n 1053,\n 780,\n 739,\n 700,\n 737,\n 664,\n 534,\n 525,\n 552,\n 603,\n 694,\n 701,\n 786,\n 842,\n 789,\n 843,\n 863,\n 743,\n 732,\n 686,\n 650,\n 620,\n 507,\n 408,\n 448,\n 368,\n 337,\n 339,\n 393,\n 373,\n 415,\n 427,\n 500,\n 658,\n 417,\n 218,\n 201,\n 208,\n 206,\n 220,\n 220,\n 223,\n 209,\n 216,\n 166,\n 222,\n 283,\n 240,\n 210,\n 204,\n 214,\n 190,\n 235,\n 200,\n 190,\n 240,\n 324,\n 1101,\n 1232,\n 1299,\n 1729,\n 2845,\n 3699,\n 3183,\n 2209,\n 1794,\n 1733,\n 1725,\n 1786,\n 1795,\n 1691,\n 1717,\n 1736,\n 1801,\n 1734,\n 1724,\n 1665,\n 1605,\n 1508,\n 1339,\n 1398,\n 1273,\n 1177,\n 1205,\n 1412,\n 1295,\n 1205,\n 1123,\n 1065,\n 1138,\n 1011,\n 960,\n 998,\n 982,\n 1096,\n 1019,\n 963,\n 1007,\n 1001,\n 1110,\n 1152,\n 1078,\n 929,\n 810,\n 837,\n 790,\n 915,\n 1059,\n 1157,\n 1119,\n 1253,\n 1320,\n 1435,\n 1315,\n 1307,\n 1148,\n 901,\n 834,\n 900,\n 961,\n 730,\n 706,\n 732,\n 787,\n 630,\n 598,\n 523,\n 620,\n 700,\n 863,\n 868,\n 903,\n 912,\n 893,\n 953,\n 878,\n 896,\n 728,\n 693,\n 613,\n 670,\n 461,\n 407,\n 385,\n 370,\n 324,\n 342,\n 326,\n 367,\n 349,\n 432,\n 460,\n 630,\n 341,\n 202,\n 209,\n 214,\n 180,\n 212,\n 213,\n 215,\n 183,\n 194,\n 199,\n 196,\n 191,\n 207,\n 200,\n 198,\n 202,\n 198,\n 222,\n 222,\n 204,\n 429,\n 1154,\n 1403,\n 1130,\n 980,\n 1056,\n 2106,\n 2993,\n 3633,\n 2475,\n 1976,\n 1680,\n 1656,\n 1788,\n 1801,\n 1825,\n 1742,\n 1851,\n 1814,\n 1716,\n 1822,\n 1685,\n 1431,\n 1298,\n 1202,\n 1287,\n 1311,\n 1246,\n 1195,\n 1259,\n 1149,\n 1176,\n 1139,\n 1258,\n 1148,\n 1101,\n 1054,\n 1024,\n 978,\n 1068,\n 1005,\n 1144,\n 924,\n 961,\n 1027,\n 1064,\n 1020,\n 973,\n 888,\n 857,\n 891,\n 1046,\n 1027,\n 1140,\n 1196,\n 1277,\n 1243,\n 1355,\n 1370,\n 1254,\n 1066,\n 952,\n 842,\n 895,\n 949,\n 822,\n 696,\n 788,\n 680,\n 656,\n 548,\n 655,\n 855,\n 952,\n 955,\n 1014,\n 1074,\n 1048,\n 1047,\n 998,\n 920,\n 822,\n 770,\n 669,\n 621,\n 645,\n 534,\n 415,\n 342,\n 338,\n 367,\n 341,\n 358,\n 354,\n 360,\n 364,\n 453,\n 522,\n 239,\n 213,\n 198,\n 210,\n 209,\n 216,\n 187,\n 219,\n 215,\n 216,\n 214,\n 241,\n 210,\n 195,\n 205,\n 198,\n 194,\n 188,\n 200,\n 208,\n 235,\n 411,\n 889,\n 1726,\n 1049,\n 853,\n 691,\n 993,\n 2327,\n 3338,\n 3507,\n 2441,\n 1958,\n 1752,\n 1706,\n 1707,\n 1739,\n 1787,\n 1779,\n 1780,\n 1714,\n 1650,\n 1519,\n 1359,\n 1230,\n 1217,\n 1245,\n 1202,\n 1184,\n 1221,\n 1186,\n 1179,\n 1077,\n 1084,\n 1165,\n 1132,\n 1012,\n 970,\n 919,\n 929,\n 1000,\n 1036,\n 1049,\n 964,\n 882,\n 944,\n 1003,\n 1100,\n 914,\n 963,\n 973,\n 881,\n 889,\n 1049,\n 1045,\n 1234,\n 1348,\n 1237,\n 1318,\n 1453,\n 1310,\n 1209,\n 881,\n 909,\n 850,\n 908,\n 697,\n 721,\n 718,\n 679,\n 630,\n 601,\n 682,\n 968,\n 1332,\n 1159,\n 1112,\n 1145,\n 1123,\n 1165,\n 1075,\n 983,\n 966,\n 877,\n 697,\n 600,\n 547,\n 471,\n 385,\n 328,\n 411,\n 351,\n 381,\n 349,\n 348,\n 355,\n 342,\n 453,\n 497,\n 220,\n 212,\n 196,\n 208,\n 178,\n 211,\n 206,\n 218,\n 206,\n 207,\n 208,\n 194,\n 186,\n 199,\n 203,\n 197,\n 208,\n 202,\n 187,\n 210,\n 193,\n 265,\n 496,\n 1497,\n 1289,\n 843,\n 716,\n 680,\n 1388,\n 2673,\n 3608,\n 3251,\n 2318,\n 2000,\n 1814,\n 1772,\n 1845,\n 1812,\n 1799,\n 1948,\n 1666,\n 1522,\n 1508,\n 1401,\n 1317,\n 1299,\n 1285,\n 1282,\n 1274,\n 1170,\n 1301,\n 1146,\n 1156,\n 1110,\n 1107,\n 1076,\n 1007,\n 927,\n 899,\n 926,\n 952,\n 977,\n 981,\n 934,\n 995,\n 955,\n 970,\n 887,\n 832,\n 933,\n 987,\n 923,\n 895,\n 895,\n 1078,\n 1257,\n 1349,\n 1229,\n 1315,\n 1256,\n 1301,\n 1293,\n 964,\n 768,\n 835,\n 923,\n 795,\n 700,\n 685,\n 692,\n 688,\n 544,\n 811,\n 1042,\n 1204,\n 1263,\n 1243,\n 1276,\n 1277,\n 1176,\n 1099,\n 1005,\n 935,\n 825,\n 716,\n 586,\n 515,\n 472,\n 344,\n 333,\n 333,\n 354,\n 356,\n 332,\n 354,\n 354,\n 344,\n 478,\n 332,\n 229,\n 219,\n 185,\n 201,\n 146,\n 182,\n 216,\n 200,\n 206,\n 177,\n 187,\n 217,\n 180,\n 184,\n 226,\n 221,\n 208,\n 230,\n 199,\n 212,\n 226,\n 215,\n 287,\n 442,\n 938,\n 1218,\n 864,\n 735,\n 682,\n 1656,\n 2949,\n 3614,\n 3101,\n 2248,\n 1887,\n 1842,\n 1724,\n 1728,\n 1815,\n 1877,\n 1897,\n 1615,\n 1402,\n 1428,\n 1337,\n 1341,\n 1334,\n 1266,\n 1182,\n 1271,\n 1225,\n 1203,\n 1166,\n 1113,\n 1164,\n 1049,\n 1031,\n 885,\n 780,\n 942,\n 969,\n 929,\n 941,\n 1022,\n 1034,\n 998,\n 948,\n 943,\n 971,\n 952,\n 972,\n 925,\n 804,\n 926,\n 1023,\n 1220,\n 1285,\n 1293,\n 1277,\n 1276,\n 1210,\n 1338,\n 1018,\n 874,\n 864,\n 924,\n 804,\n 697,\n 734,\n 640,\n 541,\n 634,\n 862,\n 1072,\n 1254,\n 1231,\n 1247,\n 1289,\n 1224,\n 1105,\n 1091,\n 1001,\n 914,\n 835,\n 616,\n 575,\n 510,\n 432,\n 375,\n 332,\n 313,\n 328,\n 351,\n 317,\n 338,\n 377,\n 395,\n 473,\n 331,\n 210,\n 205,\n 186,\n 181,\n 224,\n 229,\n 209,\n 194,\n 187,\n 232,\n 195,\n 205,\n 199,\n 194,\n 223,\n 216,\n 225,\n 200,\n 206,\n 213,\n 216,\n 211,\n 216,\n 262,\n 375,\n 696,\n 1236,\n 1160,\n 827,\n 1119,\n 2456,\n 3284,\n 3507,\n 2760,\n 2231,\n 1997,\n 1672,\n 1623,\n 1652,\n 1726,\n 1718,\n 1704,\n 1440,\n 1361,\n 1311,\n 1344,\n 1308,\n 1238,\n 1173,\n 1223,\n 1248,\n 1196,\n 1212,\n 1209,\n 1117,\n 1019,\n 774,\n 664,\n 778,\n 934,\n 967,\n 964,\n 957,\n 1001,\n 943,\n 883,\n 876,\n 923,\n 1089,\n 1045,\n 983,\n 924,\n 910,\n 956,\n 1071,\n 1167,\n 1271,\n 1221,\n 1217,\n 1192,\n 1229,\n 1251,\n 894,\n 826,\n 861,\n 832,\n 792,\n 684,\n 759,\n 662,\n 670,\n 867,\n 1142,\n 1345,\n 1284,\n 1255,\n 1227,\n 1090,\n 1061,\n 1069,\n 1094,\n 956,\n 894,\n 755,\n 656,\n 482,\n 481,\n 347,\n 361,\n 321,\n 324,\n 334,\n 302,\n 310,\n 370,\n 391,\n 404,\n 464,\n 232,\n 201,\n 192,\n 206,\n 200,\n 186,\n 202,\n 197,\n 195,\n 215,\n 217,\n 207,\n 222,\n 221,\n 208,\n 210,\n 193,\n 214,\n 211,\n 200,\n 223,\n 192,\n 202,\n 216,\n 214,\n 240,\n 332,\n 378,\n 372,\n 300,\n 471,\n 1336,\n 2393,\n 3487,\n 3510,\n 2721,\n 2216,\n 1910,\n 1780,\n 1680,\n 1718,\n 1619,\n 1600,\n 1480,\n 1337,\n 1209,\n 1379,\n 1354,\n 1353,\n 1253,\n 1233,\n 1300,\n 1269,\n 1167,\n 1106,\n 1115,\n 1052,\n 936,\n 766,\n 744,\n 785,\n 894,\n 942,\n 972,\n 958,\n 917,\n 1011,\n 894,\n 854,\n 1021,\n 985,\n 963,\n 956,\n 959,\n 1028,\n 1000,\n 1150,\n 1276,\n 1236,\n 1146,\n 1140,\n 1314,\n 1332,\n 1014,\n 898,\n 803,\n 906,\n 770,\n 715,\n 747,\n 723,\n 683,\n 910,\n 1133,\n 1232,\n 1169,\n 1219,\n 1219,\n 1114,\n 1165,\n 1076,\n 993,\n 849,\n 756,\n 625,\n 589,\n 495,\n 435,\n 347,\n 303,\n 347,\n 322,\n 325,\n 298,\n 349,\n 342,\n 382,\n 425,\n 433,\n 215,\n 206,\n 199,\n 227,\n 206,\n 167,\n 187,\n 202,\n 186,\n 236,\n 192,\n 187,\n 213,\n 218,\n 195,\n 191,\n 204,\n 212,\n 192,\n 197,\n 191,\n 233,\n 223,\n 211,\n 199,\n 202,\n 230,\n 238,\n 210,\n 207,\n 314,\n 521,\n 1750,\n 2824,\n 3841,\n 3125,\n 2366,\n 1991,\n 1702,\n 1614,\n 1485,\n 1474,\n 1371,\n 1479,\n 1423,\n 1380,\n 1383,\n 1437,\n 1377,\n 1408,\n 1411,\n 1360,\n 1264,\n 1263,\n 1285,\n 1214,\n 1088,\n 1041,\n 979,\n 950,\n 859,\n 893,\n 1003,\n 978,\n 909,\n 906,\n 902,\n 825,\n 841,\n 988,\n 956,\n 1000,\n 1033,\n 1000,\n 920,\n 971,\n 1081,\n 1101,\n 1222,\n 1136,\n 1184,\n 1184,\n 1181,\n 1051,\n 825,\n 872,\n 937,\n 781,\n 717,\n 685,\n 646,\n 618,\n 1025,\n 1191,\n 1284,\n 1170,\n 1084,\n 1129,\n 1141,\n 1084,\n 1087,\n 893,\n 843,\n 653,\n 701,\n 512,\n 404,\n 333,\n 315,\n 316,\n 348,\n 340,\n 350,\n 332,\n 365,\n 347,\n 394,\n 507,\n 315,\n 201,\n 209,\n 217,\n 225,\n 205,\n 214,\n 219,\n 192,\n 200,\n 220,\n 197,\n 204,\n 191,\n 213,\n 239,\n 193,\n 190,\n 204,\n 216,\n 212,\n 192,\n 199,\n 214,\n 197,\n 208,\n 182,\n 207,\n 212,\n 210,\n 220,\n 200,\n 271,\n 663,\n 1826,\n 2979,\n 3830,\n 2923,\n 2258,\n 1817,\n 1700,\n 1548,\n 1285,\n 1276,\n 1335,\n 1394,\n 1501,\n 1396,\n 1426,\n 1438,\n 1452,\n 1499,\n 1605,\n 1550,\n 1389,\n 1403,\n 1113,\n 1208,\n 1177,\n 1185,\n 1122,\n 1035,\n 1115,\n 1089,\n 985,\n 908,\n 867,\n 820,\n 870,\n 933,\n 947,\n 1126,\n 1108,\n 1021,\n 948,\n 970,\n 1009,\n 998,\n 972,\n 1189,\n 1163,\n 1223,\n 1202,\n 1168,\n 1021,\n 840,\n 843,\n 902,\n 817,\n 721,\n 722,\n 675,\n 745,\n 941,\n 1139,\n 1227,\n 1290,\n 1178,\n 1061,\n 1026,\n 954,\n 985,\n 900,\n 685,\n 575,\n 509,\n 409,\n 392,\n 383,\n 319,\n 313,\n 294,\n 331,\n 329,\n 349,\n 353,\n 376,\n 424,\n 565,\n 294,\n 222,\n 210,\n 208,\n 219,\n 215,\n 221,\n 208,\n 221,\n 186,\n 188,\n 174,\n 204,\n 208,\n 202,\n 201,\n 237,\n 213,\n 200,\n 201,\n 210,\n 218,\n 208,\n 220,\n 198,\n 195,\n 210,\n 195,\n 195,\n 213,\n 209,\n 229,\n 379,\n 485,\n 1075,\n 2262,\n 3294,\n 3772,\n 2727,\n 2185,\n 1766,\n 1610,\n 1268,\n 1228,\n 1273,\n 1206,\n 1284,\n 1492,\n 1423,\n 1457,\n 1365,\n 1469,\n 1728,\n 1681,\n 1427,\n 1398,\n 1455,\n 1371,\n 1187,\n 1143,\n 1190,\n 1118,\n 1088,\n 1067,\n 1014,\n 957,\n 903,\n 920,\n 946,\n 894,\n 990,\n 1145,\n 1118,\n 1009,\n 909,\n 948,\n 1063,\n 1161,\n 1010,\n 1037,\n 1202,\n 1183,\n 1265,\n 1301,\n 1046,\n 902,\n 793,\n 955,\n 797,\n 682,\n 690,\n 687,\n 692,\n 924,\n 1025,\n 1071,\n 1156,\n 1098,\n 889,\n 1078,\n 993,\n 971,\n 795,\n 615,\n 491,\n 436,\n 370,\n 362,\n 363,\n 296,\n 289,\n 339,\n 325,\n 308,\n 342,\n 342,\n 432,\n 491,\n 559,\n 229,\n 211,\n 226,\n 195,\n 179,\n 207,\n 198,\n 183,\n 181,\n 203,\n 211,\n 186,\n 212,\n 212,\n 206,\n 210,\n 203,\n 209,\n 207,\n 234,\n 204,\n 205,\n 217,\n 197,\n 219,\n 209,\n 195,\n 211,\n 181,\n 220,\n 212,\n 203,\n 262,\n 426,\n 554,\n 1193,\n 2463,\n 3440,\n 3541,\n 2425,\n 1972,\n 1681,\n 1486,\n 1278,\n 1223,\n 1190,\n 1329,\n 1373,\n 1363,\n 1381,\n 1439,\n 1445,\n 1485,\n 1524,\n 1547,\n 1321,\n 1438,\n 1429,\n 1391,\n 1370,\n 1260,\n 1294,\n 1192,\n 1234,\n 957,\n 986,\n 991,\n 1002,\n 1055,\n 1018,\n 1083,\n 1084,\n 1028,\n 955,\n 981,\n 909,\n 1084,\n 1180,\n 1119,\n 1156,\n 1161,\n 1225,\n 1222,\n 1197,\n 1062,\n 836,\n 795,\n 891,\n 884,\n 732,\n 724,\n 729,\n 693,\n 715,\n 934,\n 970,\n 1040,\n 1090,\n 964,\n 983,\n 918,\n 926,\n 719,\n 574,\n 454,\n 449,\n 403,\n 363,\n 358,\n 352,\n 331,\n 343,\n 379,\n 364,\n 365,\n 415,\n 375,\n 552,\n 465,\n 240,\n 224,\n 181,\n 194,\n 212,\n 204,\n 189,\n 222,\n 186,\n 206,\n 192,\n 189,\n 199,\n 197,\n 198,\n 240,\n 202,\n 190,\n 204,\n 194,\n 216,\n 193,\n 220,\n 217,\n 211,\n 191,\n 186,\n 211,\n 195,\n 235,\n 218,\n 210,\n 278,\n 366,\n 669,\n 783,\n 1588,\n 2789,\n 3865,\n 3000,\n 2192,\n 1787,\n 1548,\n 1382,\n 1249,\n 1305,\n 1246,\n 1270,\n 1321,\n 1324,\n 1406,\n 1477,\n 1568,\n 1442,\n 1463,\n 1422,\n 1311,\n 1414,\n 1378,\n 1391,\n 1439,\n 1313,\n 1237,\n 1234,\n 1171,\n 1012,\n 1028,\n 1049,\n 971,\n 1012,\n 1068,\n 1038,\n 988,\n 953,\n 877,\n 996,\n 1043,\n 1142,\n 1152,\n 1151,\n 1273,\n 1183,\n 1080,\n 1134,\n 1055,\n 881,\n 871,\n 968,\n 808,\n 713,\n 734,\n 722,\n 621,\n 671,\n 876,\n 952,\n 1110,\n 958,\n 842,\n 957,\n 943,\n 865,\n 673,\n 480,\n 414,\n 448,\n 340,\n 406,\n 390,\n 330,\n 331,\n 401,\n 372,\n 316,\n 384,\n 374,\n 465,\n 591,\n 306,\n 208,\n 213,\n 216,\n 186,\n 200,\n 207,\n 200,\n 223,\n 184,\n 180,\n 223,\n 213,\n 205,\n 196,\n 186,\n 214,\n 221,\n 222,\n 202,\n 181,\n 192,\n 206,\n 194,\n 204,\n 204,\n 187,\n 183,\n 193,\n 215,\n 202,\n 209,\n 215,\n 214,\n 323,\n 612,\n 654,\n 815,\n 1687,\n 2999,\n 3826,\n 2858,\n 2077,\n 1628,\n 1473,\n 1313,\n 1476,\n 1430,\n 1302,\n 1316,\n 1432,\n 1431,\n 1535,\n 1567,\n 1610,\n 1658,\n 1460,\n 1448,\n 1198,\n 1269,\n 1297,\n 1449,\n 1400,\n 1266,\n 1159,\n 1182,\n 1132,\n 1168,\n 1174,\n 1212,\n 1076,\n 1051,\n 1005,\n 1054,\n 975,\n 998,\n 1055,\n 1010,\n 1105,\n 1058,\n 1219,\n 1183,\n 1169,\n 1082,\n 1099,\n 1060,\n 895,\n 836,\n 942,\n 782,\n 712,\n 698,\n 705,\n 591,\n 579,\n 629,\n 877,\n 969,\n 1014,\n 908,\n 892,\n 748,\n 665,\n 498,\n 471,\n 430,\n 428,\n 377,\n 412,\n 373,\n 368,\n 394,\n 395,\n 399,\n 404,\n 421,\n 428,\n 516,\n 588,\n 237,\n 202,\n 202,\n 202,\n 197,\n 210,\n 208,\n 184,\n 202,\n 212,\n 178,\n 216,\n 214,\n 216,\n 192,\n 207,\n 224,\n 211,\n 198,\n 221,\n 212,\n 180,\n 198,\n 213,\n 200,\n 212,\n 176,\n 181,\n 211,\n 192,\n 222,\n 214,\n 197,\n 241,\n 293,\n 355,\n 736,\n 682,\n 803,\n 1545,\n 2867,\n 3795,\n 2779,\n 1919,\n 1540,\n 1420,\n 1387,\n 1573,\n 1507,\n 1385,\n 1385,\n 1333,\n 1391,\n 1425,\n 1448,\n 1454,\n 1384,\n 1360,\n 1275,\n 1217,\n 1302,\n 1320,\n 1327,\n 1361,\n 1246,\n 1269,\n 1248,\n 1271,\n 1164,\n 1254,\n 1120,\n 1027,\n 961,\n 1080,\n 1064,\n 990,\n 1002,\n 1078,\n 1198,\n 1164,\n 1206,\n 1187,\n 1205,\n 1110,\n 1171,\n 1001,\n 966,\n 876,\n 1004,\n 890,\n 732,\n 685,\n 657,\n 618,\n 627,\n 592,\n 623,\n 668,\n 785,\n 900,\n 777,\n 642,\n 527,\n 455,\n 456,\n 443,\n 424,\n 399,\n 399,\n 367,\n 353,\n 461,\n 428,\n 402,\n 403,\n 438,\n 417,\n 558,\n 710,\n 303,\n 218,\n 185,\n 211,\n 217,\n 183,\n 226,\n 203,\n 204,\n 202,\n 187,\n 181,\n 227,\n 193,\n 182,\n 189,\n 202,\n 186,\n 204,\n 200,\n 191,\n 197,\n 196,\n 216,\n 215,\n 196,\n 221,\n 212,\n 196,\n 190,\n 180,\n 184,\n 207,\n 198,\n 243,\n 387,\n 607,\n 790,\n 627,\n 928,\n 2080,\n 3269,\n 3381,\n 2235,\n 1727,\n 1628,\n 1495,\n 1511,\n 1616,\n 1546,\n 1424,\n 1328,\n 1371,\n 1311,\n 1350,\n 1419,\n 1384,\n 1427,\n 1408,\n 1290,\n 1278,\n 1371,\n 1274,\n 1197,\n 1276,\n 1274,\n 1246,\n 1092,\n 1053,\n 1132,\n 1069,\n 966,\n 988,\n 954,\n 1032,\n 1004,\n 1108,\n 1193,\n 1145,\n 1140,\n 1212,\n 1102,\n 1172,\n 1103,\n 1068,\n 1056,\n 945,\n 824,\n 998,\n 880,\n 840,\n 686,\n 628,\n 603,\n 531,\n 580,\n 549,\n 556,\n 630,\n 666,\n 674,\n 528,\n 424,\n 457,\n 435,\n 480,\n 447,\n 402,\n 399,\n 373,\n 427,\n 398,\n 472,\n 490,\n 469,\n 472,\n 504,\n 740,\n 590,\n 251,\n 193,\n 195,\n 222,\n 176,\n 210,\n 193,\n 187,\n 224,\n 177,\n 204,\n 207,\n 200,\n 219,\n 218,\n 198,\n 210,\n 233,\n 205,\n 215,\n 199,\n 208,\n 225,\n 203,\n 194,\n 217,\n 201,\n 216,\n 212,\n 199,\n 197,\n 220,\n 214,\n 242,\n 232,\n 282,\n 501,\n 928,\n 679,\n 637,\n 1079,\n 2529,\n 3498,\n 2943,\n 1939,\n 1663,\n 1498,\n 1599,\n 1593,\n 1692,\n 1568,\n 1459,\n 1389,\n 1430,\n 1321,\n 1305,\n 1728,\n 2614,\n 1400,\n 1335,\n 1298,\n 1383,\n 1212,\n 1272,\n 1302,\n 1285,\n 1284,\n 1237,\n 1137,\n 1071,\n 1104,\n 1202,\n 1063,\n 990,\n 1081,\n 1149,\n 1199,\n 1093,\n 1166,\n 1051,\n 1099,\n 1139,\n 1173,\n 1091,\n 1060,\n 1054,\n 895,\n 928,\n 961,\n 860,\n 738,\n 653,\n 653,\n 616,\n 655,\n 580,\n 587,\n 604,\n 557,\n 539,\n 480,\n 478,\n 467,\n 418,\n 450,\n 402,\n 472,\n 419,\n 399,\n 423,\n 455,\n 468,\n 511,\n 512,\n 479,\n 517,\n 569,\n 893,\n 319,\n 233,\n 209,\n 205,\n 195,\n 213,\n 209,\n 202,\n 197,\n 227,\n 205,\n 204,\n 209,\n 220,\n 196,\n 221,\n 191,\n 201,\n 201,\n 229,\n 177,\n 219,\n 226,\n 197,\n 204,\n 186,\n 215,\n 186,\n 222,\n 211,\n 196,\n 196,\n 191,\n 211,\n 208,\n 217,\n 245,\n 383,\n 731,\n 840,\n 779,\n 692,\n 1081,\n 2446,\n 3473,\n 2943,\n 2101,\n 1781,\n 1652,\n 1653,\n 1636,\n 1700,\n 1669,\n 1502,\n 1503,\n 1313,\n 1292,\n 1423,\n 1424,\n 1390,\n 1259,\n 1303,\n 1362,\n 1268,\n 1329,\n 1275,\n 1275,\n 1301,\n 1256,\n 1218,\n 1214,\n 1098,\n 1134,\n 1107,\n 1077,\n 1035,\n 1159,\n 1101,\n 1166,\n 1056,\n 1087,\n 1037,\n 1163,\n 1117,\n 1033,\n 965,\n 996,\n 951,\n 920,\n 923,\n 926,\n 707,\n 655,\n 599,\n 622,\n 622,\n 584,\n 666,\n 572,\n 506,\n 522,\n 485,\n 508,\n 494,\n 527,\n 473,\n 419,\n 506,\n 409,\n 408,\n 421,\n 448,\n 472,\n 522,\n 529,\n 540,\n 518,\n 628,\n 825,\n 333,\n 208,\n 208,\n 202,\n 204,\n 216,\n 203,\n 187,\n 185,\n 198,\n 170,\n 234,\n 168,\n 223,\n 221,\n 200,\n 223,\n 194,\n 209,\n 201,\n 194,\n 183,\n 190,\n 215,\n 200,\n 174,\n 203,\n 199,\n 200,\n 201,\n 190,\n 201,\n 197,\n 200,\n 184,\n 204,\n 210,\n 300,\n 439,\n 856,\n 968,\n 822,\n 752,\n 1300,\n 2585,\n 3396,\n 2863,\n 1997,\n 1732,\n 1722,\n 1673,\n 1712,\n 1628,\n 1632,\n 1491,\n 1461,\n 1408,\n 1465,\n 1557,\n 1396,\n 1381,\n 1213,\n 1246,\n 1387,\n 1382,\n 1294,\n 1210,\n 1228,\n 1257,\n 1286,\n 1129,\n 1189,\n 1121,\n 1064,\n 1047,\n 1107,\n 1037,\n 1123,\n 1189,\n 1091,\n 1092,\n 1122,\n 1080,\n 1068,\n 1034,\n 1016,\n 957,\n 933,\n 847,\n 965,\n 988,\n 830,\n 650,\n 648,\n 592,\n 619,\n 614,\n 546,\n 599,\n 482,\n 570,\n 508,\n 421,\n 429,\n 527,\n 516,\n 473,\n 448,\n 452,\n 421,\n 394,\n 447,\n 496,\n 554,\n 588,\n 548,\n 535,\n 715,\n 807,\n 240,\n 224,\n 211,\n 214,\n 211,\n 193,\n 178,\n 189,\n 194,\n 204,\n 212,\n 223,\n 205,\n 200,\n 201,\n 177,\n 215,\n 202,\n 230,\n 206,\n 215,\n 222,\n 205,\n 201,\n 213,\n 176,\n 189,\n 207,\n 205,\n 206,\n 188,\n 205,\n 198,\n 219,\n 236,\n 195,\n 212,\n 211,\n 435,\n 565,\n 909,\n 1115,\n 999,\n 876,\n 1543,\n 2567,\n 3448,\n 2825,\n 1973,\n 1822,\n 1650,\n 1715,\n 1610,\n 1638,\n 1608,\n 1372,\n 1535,\n 1548,\n 1452,\n 1299,\n 1331,\n 1239,\n 1385,\n 1336,\n 1281,\n 1259,\n 1261,\n 1286,\n 1284,\n 1235,\n 1167,\n 1121,\n 1120,\n 1118,\n 1108,\n 1009,\n 1236,\n 1117,\n 1062,\n 1038,\n 1057,\n 1102,\n 1010,\n 1002,\n 966,\n 1027,\n 1034,\n 971,\n 940,\n 989,\n 1033,\n 790,\n 615,\n 588,\n 615,\n 598,\n 602,\n 590,\n 547,\n 614,\n 492,\n 502,\n 476,\n 541,\n 507,\n 518,\n 462,\n 464,\n 471,\n 495,\n 443,\n 534,\n 520,\n 538,\n 534,\n 554,\n 541,\n 891,\n 525,\n 236,\n 218,\n 175,\n 198,\n 219,\n 216,\n 219,\n 202,\n 203,\n 182,\n 211,\n 210,\n 207,\n 194,\n 172,\n 203,\n 216,\n 190,\n 207,\n 217,\n 235,\n 194,\n 196,\n 192,\n 211,\n 203,\n 203,\n 208,\n 197,\n 187,\n 188,\n 193,\n 188,\n 216,\n 217,\n 178,\n 184,\n 210,\n 242,\n 379,\n 487,\n 896,\n 824,\n 759,\n 699,\n 1276,\n 2439,\n 3421,\n 2824,\n 1914,\n 1760,\n 1791,\n 1699,\n 1711,\n 1648,\n 1663,\n 1580,\n 1526,\n 1574,\n 1383,\n 1453,\n 1490,\n 1386,\n 1284,\n 1293,\n 1129,\n 1120,\n 1281,\n 1286,\n 1179,\n 1035,\n 1093,\n 1160,\n 1188,\n 1189,\n 1151,\n 1127,\n 1169,\n 1094,\n 1042,\n 1242,\n 1146,\n 946,\n 945,\n 957,\n 978,\n 1073,\n 973,\n 863,\n 954,\n 993,\n 799,\n 684,\n 659,\n 601,\n 662,\n 610,\n 564,\n 616,\n 604,\n 548,\n 553,\n 498,\n 485,\n 495,\n 502,\n 495,\n 481,\n 481,\n 497,\n 517,\n 595,\n 587,\n 586,\n 584,\n 619,\n 623,\n 952,\n 393,\n 189,\n 218,\n 212,\n 207,\n 186,\n 181,\n 216,\n 192,\n 188,\n 195,\n 203,\n 203,\n 193,\n 203,\n 198,\n 178,\n 202,\n 234,\n 198,\n 204,\n 205,\n 189,\n 229,\n 190,\n 208,\n 187,\n 202,\n 216,\n 224,\n 200,\n 191,\n 202,\n 192,\n 201,\n 233,\n 216,\n 212,\n 187,\n 187,\n 233,\n 316,\n 486,\n 361,\n 411,\n 485,\n 758,\n 1517,\n 2504,\n 3308,\n 2743,\n 1967,\n 1809,\n 1734,\n 1656,\n 1732,\n 1736,\n 1663,\n 1569,\n 1576,\n 1700,\n 1735,\n 2176,\n 1491,\n 1346,\n 1263,\n 1260,\n 1150,\n 1282,\n 1246,\n 1107,\n 1207,\n 1122,\n 1073,\n 1245,\n 1260,\n 1229,\n 1378,\n 2080,\n 1285,\n 1208,\n 1221,\n 1013,\n 915,\n 838,\n 898,\n 960,\n 965,\n 928,\n 897,\n 954,\n 997,\n 835,\n 736,\n 698,\n 650,\n 644,\n 604,\n 692,\n 636,\n 608,\n 597,\n 549,\n 535,\n 538,\n 525,\n 498,\n 522,\n 442,\n 531,\n 525,\n 558,\n 626,\n 598,\n 601,\n 555,\n 576,\n 680,\n 911,\n 329,\n 197,\n 197,\n 198,\n 189,\n 201,\n 206,\n 199,\n 180,\n 212,\n 203,\n 192,\n 204,\n 198,\n 207,\n 189,\n 216,\n 225,\n 205,\n 173,\n 210,\n 199,\n 200,\n 223,\n 210,\n 201,\n 199,\n 231,\n 203,\n 212,\n 182,\n 206,\n 218,\n 208,\n 211,\n 187,\n 202,\n 213,\n 208,\n 217,\n 213,\n 238,\n 254,\n 265,\n 231,\n 348,\n 665,\n 928,\n 1417,\n 2615,\n 3304,\n 2707,\n 1934,\n 1835,\n 1757,\n 1721,\n 1813,\n 1840,\n 1701,\n 1649,\n 1727,\n 2023,\n 1644,\n 1336,\n 1367,\n 1351,\n 1271,\n 1135,\n 1230,\n 1167,\n 1246,\n 1252,\n 1213,\n 1130,\n 1169,\n 1150,\n 1107,\n 1092,\n 1131,\n 1107,\n 1001,\n 1017,\n 1008,\n 829,\n 835,\n 903,\n 848,\n 946,\n 956,\n 974,\n 986,\n 959,\n 879,\n 788,\n 720,\n 712,\n 803,\n 808,\n 675,\n 681,\n 645,\n 555,\n 566,\n 547,\n 515,\n 535,\n 482,\n 498,\n 514,\n 524,\n 572,\n 598,\n 668,\n 655,\n 611,\n 638,\n 593,\n 736,\n 857,\n 308,\n 210,\n 228,\n 189,\n 194,\n 194,\n 153,\n 182,\n 194,\n 192,\n 222,\n 191,\n 177,\n 201,\n 191,\n 198,\n 202,\n 190,\n 195,\n 200,\n 215,\n 209,\n 188,\n 222,\n 183,\n 201,\n 218,\n 189,\n 195,\n 178,\n 223,\n 201,\n 219,\n 198,\n 204,\n 197,\n 204,\n 213,\n 184,\n 192,\n 205,\n 197,\n 190,\n 189,\n 202,\n 256,\n 459,\n 903,\n 989,\n 1612,\n 2490,\n 3246,\n 2760,\n 1973,\n 1777,\n 1814,\n 1750,\n 1711,\n 1776,\n 1726,\n 1787,\n 2322,\n 1832,\n 1471,\n 1473,\n 1372,\n 1342,\n 1399,\n 1483,\n 1381,\n 1245,\n 1251,\n 1249,\n 1126,\n 1197,\n 1048,\n 1101,\n 1482,\n 1293,\n 1078,\n 968,\n 946,\n 854,\n 850,\n 855,\n 858,\n 915,\n 933,\n 911,\n 942,\n 1017,\n 959,\n 892,\n 811,\n 800,\n 934,\n 865,\n 822,\n 789,\n 718,\n 639,\n 605,\n 542,\n 550,\n 529,\n 538,\n 607,\n 579,\n 585,\n 649,\n 691,\n 634,\n 648,\n 642,\n 669,\n 614,\n 622,\n 978,\n 658,\n 265,\n 193,\n 231,\n 198,\n 218,\n 182,\n 183,\n 196,\n 196,\n 175,\n 206,\n 208,\n 218,\n 196,\n 215,\n 172,\n 213,\n 193,\n 201,\n 190,\n 185,\n 198,\n 196,\n 197,\n 216,\n 220,\n 186,\n 213,\n 210,\n 219,\n 214,\n 199,\n 184,\n 205,\n 209,\n 202,\n 196,\n 196,\n 195,\n 205,\n 213,\n 196,\n 186,\n 208,\n 213,\n 214,\n 207,\n 310,\n 692,\n 924,\n 1677,\n 2346,\n 3056,\n 2745,\n 1976,\n 1853,\n 1881,\n 1760,\n 1677,\n 1784,\n 1707,\n 1788,\n 2056,\n 1886,\n 1460,\n 1411,\n 1299,\n 1316,\n 1347,\n 1402,\n 1449,\n 1250,\n 1157,\n 1260,\n 1106,\n 1061,\n 1165,\n 1193,\n 1161,\n 1011,\n 972,\n 848,\n 881,\n 823,\n 843,\n 824,\n 906,\n 958,\n 1004,\n 1043,\n 1082,\n 1000,\n 974,\n 863,\n 865,\n 982,\n 890,\n 845,\n 774,\n 682,\n 683,\n 638,\n 618,\n 604,\n 646,\n 614,\n 651,\n 664,\n 697,\n 711,\n 759,\n 705,\n 724,\n 737,\n 682,\n 628,\n 746,\n 1137,\n 519,\n 212,\n 196,\n 209,\n 196,\n 195,\n 195,\n 171,\n 206,\n 200,\n 187,\n 240,\n 197,\n 186,\n 192,\n 188,\n 196,\n 201,\n 207,\n 206,\n 210,\n 240,\n 228,\n 187,\n 207,\n 202,\n 193,\n 200,\n 206,\n 205,\n 207,\n 214,\n 217,\n 200,\n 201,\n 205,\n 209,\n 185,\n 202,\n 182,\n 206,\n 209,\n 199,\n 198,\n 183,\n 179,\n 215,\n 210,\n 221,\n 272,\n 445,\n 976,\n 1557,\n 2161,\n 2823,\n 2990,\n 2136,\n 1805,\n 1773,\n 1598,\n 1553,\n 1534,\n 1684,\n 1690,\n 1643,\n 1511,\n 1438,\n 1343,\n 1346,\n 1279,\n 1232,\n 1377,\n 1383,\n 1357,\n 1243,\n 1174,\n 1148,\n 1209,\n 1217,\n 1063,\n 1027,\n 905,\n 940,\n 844,\n 862,\n 775,\n 797,\n 881,\n 927,\n 1010,\n 1140,\n 1098,\n 1066,\n 988,\n 1149,\n 1110,\n 1089,\n 989,\n 915,\n 859,\n 771,\n 704,\n 686,\n 653,\n 730,\n 686,\n 631,\n 669,\n 750,\n 701,\n 777,\n 769,\n 877,\n 785,\n 726,\n 745,\n 711,\n 777,\n 1187,\n 552,\n 217,\n 202,\n 216,\n 226,\n 215,\n 192,\n 204,\n 214,\n 216,\n 188,\n 180,\n 179,\n 220,\n 175,\n 196,\n 212,\n 191,\n 194,\n 184,\n 223,\n 213,\n 206,\n 186,\n 223,\n 201,\n 205,\n 197,\n 208,\n 225,\n 206,\n 199,\n 190,\n 192,\n 201,\n 197,\n 197,\n 216,\n 207,\n 180,\n 173,\n 170,\n 241,\n 219,\n 199,\n 203,\n 207,\n 230,\n 196,\n 207,\n 208,\n 371,\n 902,\n 1360,\n 2015,\n 2695,\n 3121,\n 2259,\n 1813,\n 1716,\n 1644,\n 1536,\n 1591,\n 1580,\n 1541,\n 1580,\n 1522,\n 1448,\n 1411,\n 1272,\n 1284,\n 1292,\n 1327,\n 1307,\n 1167,\n 1224,\n 1181,\n 1156,\n 1084,\n 1034,\n 975,\n 1006,\n 932,\n 837,\n 873,\n 812,\n 813,\n 915,\n 1070,\n 1190,\n 1288,\n 1379,\n 1572,\n 1596,\n 1675,\n 1718,\n 1714,\n 1738,\n 1798,\n 1725,\n 1569,\n 1340,\n 1094,\n 828,\n 767,\n 715,\n 762,\n 756,\n 771,\n 788,\n 796,\n 915,\n 909,\n 790,\n 805,\n 789,\n 765,\n 856,\n 1353,\n 563,\n 220,\n 206,\n 212,\n 198,\n 206,\n 202,\n 199,\n 209,\n 194,\n 183,\n 217,\n 194,\n 192,\n 212,\n 202,\n 202,\n 194,\n 199,\n 198,\n 199,\n 210,\n 178,\n 211,\n 240,\n 194,\n 185,\n 171,\n 177,\n 205,\n 184,\n 210,\n 234,\n 219,\n 193,\n 188,\n 197,\n 192,\n 180,\n 182,\n 197,\n 199,\n 192,\n 210,\n 185,\n 211,\n 238,\n 191,\n 234,\n 229,\n 217,\n 257,\n 456,\n 900,\n 1328,\n 2141,\n 2851,\n 3042,\n 2305,\n 1892,\n 1755,\n 1693,\n 1667,\n 1542,\n 1498,\n 1554,\n 1467,\n 1468,\n 1417,\n 1421,\n 1393,\n 1345,\n 1352,\n 1268,\n 1152,\n 1112,\n 1109,\n 1128,\n 1077,\n 1127,\n 1065,\n 1029,\n 997,\n 943,\n 924,\n 888,\n 956,\n 987,\n 1175,\n 1552,\n 1822,\n 2210,\n 2686,\n 3241,\n 4020,\n 5006,\n 4089,\n 4154,\n 4388,\n 3738,\n 3639,\n 3167,\n 2395,\n 1766,\n 1051,\n 791,\n 835,\n 884,\n 852,\n 961,\n 964,\n 962,\n 939,\n 831,\n 787,\n 821,\n 804,\n 1237,\n 1215,\n 297,\n 208,\n 201,\n 222,\n 190,\n 193,\n 192,\n 214,\n 206,\n 201,\n 202,\n 192,\n 179,\n 198,\n 206,\n 171,\n 193,\n 191,\n 191,\n 201,\n 188,\n 194,\n 229,\n 209,\n 218,\n 216,\n 214,\n 213,\n 202,\n 209,\n 209,\n 212,\n 214,\n 173,\n 197,\n 196,\n 193,\n 200,\n 188,\n 188,\n 198,\n 214,\n 219,\n 188,\n 225,\n 189,\n 209,\n 188,\n 194,\n 227,\n 206,\n 228,\n 250,\n 397,\n 787,\n 775,\n 1456,\n 2405,\n 3045,\n 2940,\n 2044,\n 1857,\n 1778,\n 1673,\n 1546,\n 1518,\n 1507,\n 1447,\n 1420,\n 1432,\n 1346,\n 1334,\n 1300,\n 1334,\n 1204,\n 1189,\n 1130,\n 1211,\n 1101,\n 1100,\n 1054,\n 1092,\n 986,\n 1092,\n 1008,\n 964,\n 992,\n 1042,\n 1384,\n 1575,\n 2284,\n 2962,\n 3281,\n 3613,\n 3829,\n 3770,\n 3632,\n 3563,\n 3792,\n 3517,\n 3264,\n 3003,\n 2476,\n 2044,\n 1612,\n 1115,\n 899,\n 938,\n 937,\n 1038,\n 1047,\n 1003,\n 914,\n 958,\n 872,\n 890,\n 878,\n 1243,\n 1587,\n 418,\n 231,\n 186,\n 200,\n 203,\n 180,\n 198,\n 222,\n 195,\n 172,\n 207,\n 168,\n 200,\n 199,\n 216,\n 207,\n 203,\n 199,\n 186,\n 213,\n 193,\n 196,\n 185,\n 213,\n 205,\n 173,\n 187,\n 213,\n 182,\n 214,\n 217,\n 173,\n 213,\n 182,\n 218,\n 198,\n 202,\n 180,\n 222,\n 213,\n 185,\n 190,\n 206,\n 188,\n 176,\n 207,\n 186,\n 212,\n 197,\n 191,\n 175,\n 196,\n 212,\n 366,\n 707,\n 667,\n 932,\n 1648,\n 2652,\n 3276,\n 2724,\n 2207,\n 1841,\n 1832,\n 1709,\n 1519,\n 1617,\n 1513,\n 1383,\n 1328,\n 1327,\n 1410,\n 1341,\n 1318,\n 1243,\n 1196,\n 1201,\n 1132,\n 1148,\n 1119,\n 1256,\n 1246,\n 1163,\n 1107,\n 1104,\n 1069,\n 1208,\n 1486,\n 1815,\n 2172,\n 2891,\n 2590,\n 2819,\n 2800,\n 2649,\n 2457,\n 2437,\n 2463,\n 2459,\n 2354,\n 2169,\n 1984,\n 1763,\n 1698,\n 1445,\n 971,\n 928,\n 1001,\n 1022,\n 1031,\n 1081,\n 1080,\n 952,\n 886,\n 942,\n 912,\n 904,\n 1624,\n 994,\n 224,\n 235,\n 199,\n 189,\n 214,\n 182,\n 190,\n 187,\n 204,\n 188,\n 190,\n 189,\n 222,\n 188,\n 200,\n 187,\n 196,\n 199,\n 188,\n 184,\n 194,\n 200,\n 233,\n 206,\n 192,\n 194,\n 149,\n 191,\n 205,\n 194,\n 199,\n 222,\n 203,\n 202,\n 216,\n 195,\n 202,\n 184,\n 199,\n 179,\n 175,\n 201,\n 207,\n 191,\n 202,\n 213,\n 200,\n 212,\n 197,\n 193,\n 228,\n 219,\n 194,\n 220,\n 363,\n 657,\n 528,\n 560,\n 967,\n 1833,\n 2539,\n 3030,\n 2943,\n 2283,\n 2016,\n 1791,\n 1780,\n 1608,\n 1535,\n 1460,\n 1433,\n 1450,\n 1449,\n 1450,\n 1360,\n 1240,\n 1181,\n 1176,\n 1219,\n 1263,\n 1345,\n 1247,\n 1241,\n 1221,\n 1221,\n 1115,\n 1263,\n 1479,\n 1801,\n 2138,\n 2447,\n 2555,\n 2496,\n 2547,\n 2435,\n 2414,\n 2344,\n 2248,\n 2321,\n 2243,\n 2105,\n 1956,\n 1853,\n 1682,\n 1479,\n 1127,\n 1029,\n 1025,\n 1084,\n 1204,\n 1222,\n 1254,\n 1183,\n 1048,\n 995,\n 990,\n 897,\n 1204,\n 1628,\n 430,\n 211,\n 198,\n 180,\n 192,\n 195,\n 187,\n 211,\n 212,\n 191,\n 197,\n 241,\n 174,\n 214,\n 200,\n 211,\n 176,\n 188,\n 188,\n 191,\n 217,\n 196,\n 205,\n 205,\n 242,\n 193,\n 182,\n 191,\n 202,\n 210,\n 200,\n 210,\n 228,\n 200,\n 197,\n 178,\n 189,\n 216,\n 237,\n 220,\n 228,\n 195,\n 208,\n 184,\n 191,\n 201,\n 219,\n 177,\n 183,\n 195,\n 196,\n 236,\n 226,\n 190,\n 272,\n 643,\n 591,\n 480,\n 516,\n 790,\n 1299,\n 2271,\n 3041,\n 3266,\n 2544,\n 2130,\n 1948,\n 1888,\n 1689,\n 1603,\n 1546,\n 1507,\n 1498,\n 1359,\n 1354,\n 1330,\n 1370,\n 1309,\n 1287,\n 1375,\n 1152,\n 1167,\n 1174,\n 1161,\n 1159,\n 1165,\n 1560,\n 1793,\n 2037,\n 2253,\n 2477,\n 2497,\n 2644,\n 2425,\n 2537,\n 2415,\n 2448,\n 2386,\n 2337,\n 2156,\n 2201,\n 1948,\n 1905,\n 1794,\n 1372,\n 1152,\n 1105,\n 1104,\n 1084,\n 1196,\n 1132,\n 1276,\n 1210,\n 1106,\n 1097,\n 1085,\n 960,\n 1377,\n 1275,\n 309,\n 213,\n 193,\n 180,\n 195,\n 189,\n 205,\n 202,\n 220,\n 208,\n 183,\n 224,\n 184,\n 174,\n 172,\n 207,\n 191,\n 188,\n 218,\n 181,\n 235,\n 219,\n 234,\n 213,\n 184,\n 214,\n 175,\n 217,\n 189,\n 196,\n 195,\n 186,\n 202,\n 203,\n 171,\n 205,\n 206,\n 187,\n 200,\n 220,\n 233,\n 224,\n 201,\n 209,\n 179,\n 212,\n 216,\n 218,\n 194,\n 234,\n 196,\n 202,\n 207,\n 167,\n 196,\n 274,\n 660,\n 553,\n 462,\n 459,\n 474,\n 680,\n 1203,\n 2011,\n 2777,\n 3340,\n 2940,\n 2318,\n 2102,\n 1844,\n 1819,\n 1725,\n 1716,\n 1673,\n 1614,\n 1352,\n 1322,\n 1503,\n 1297,\n 1217,\n 1188,\n 1228,\n 1168,\n 1224,\n 1155,\n 1265,\n 1662,\n 2012,\n 2080,\n 2270,\n 2446,\n 2595,\n 2481,\n 2482,\n 2347,\n 2406,\n 2534,\n 2430,\n 2407,\n 2428,\n 2493,\n 2326,\n 2197,\n 2107,\n 1886,\n 1503,\n 1372,\n 1263,\n 1313,\n 1267,\n 1280,\n 1293,\n 1285,\n 1359,\n 1292,\n 1213,\n 1155,\n 1235,\n 1756,\n 530,\n 230,\n 206,\n 197,\n 193,\n 190,\n 169,\n 192,\n 189,\n 203,\n 207,\n 189,\n 190,\n 209,\n 195,\n 197,\n 218,\n 175,\n 196,\n 194,\n 193,\n 189,\n 204,\n 216,\n 186,\n 193,\n 202,\n 177,\n 211,\n 204,\n 203,\n 213,\n 179,\n 211,\n 196,\n 199,\n 186,\n 196,\n 210,\n 204,\n 198,\n 208,\n 203,\n 214,\n 204,\n 192,\n 200,\n 190,\n 197,\n 213,\n 206,\n 195,\n 195,\n 220,\n 181,\n 332,\n 701,\n 600,\n 519,\n 430,\n 425,\n 483,\n 641,\n 911,\n 1299,\n 1736,\n 2693,\n 3328,\n 2734,\n 2424,\n 2260,\n 2208,\n 2096,\n 1930,\n 1675,\n 1455,\n 1499,\n 1441,\n 1260,\n 1155,\n 1114,\n 1138,\n 1088,\n 1222,\n 1455,\n 1657,\n 1804,\n 2027,\n 2278,\n 2518,\n 2417,\n 2686,\n 2503,\n 2480,\n 2635,\n 2836,\n 2893,\n 3063,\n 3024,\n 2821,\n 2519,\n 2483,\n 2414,\n 2199,\n 1871,\n 1605,\n 1550,\n 1489,\n 1542,\n 1357,\n 1325,\n 1355,\n 1371,\n 1320,\n 1350,\n 1370,\n 1101,\n 1823,\n 940,\n 222,\n 236,\n 193,\n 204,\n 222,\n 182,\n 196,\n 191,\n 184,\n 180,\n 226,\n 156,\n 188,\n 218,\n 207,\n 216,\n 201,\n 198,\n 209,\n 206,\n 219,\n 216,\n 200,\n 212,\n 189,\n 197,\n 177,\n 208,\n 202,\n 192,\n 194,\n 189,\n 196,\n 222,\n 184,\n 174,\n 186,\n 190,\n 195,\n 197,\n 206,\n 231,\n 200,\n 199,\n 208,\n 223,\n 216,\n 219,\n 186,\n 209,\n 188,\n 181,\n 210,\n 212,\n 216,\n 237,\n 552,\n 673,\n 529,\n 422,\n 447,\n 476,\n 382,\n 431,\n 541,\n 637,\n 865,\n 1320,\n 2038,\n 2816,\n 3366,\n 3043,\n 2866,\n 2539,\n 2310,\n 1849,\n 1677,\n 1735,\n 1685,\n 1468,\n 1318,\n 1125,\n 1319,\n 1382,\n 1656,\n 1892,\n 1940,\n 2208,\n 2351,\n 2389,\n 2694,\n 2901,\n 3152,\n 3170,\n 3308,\n 3030,\n 2692,\n 2351,\n 1967,\n 1786,\n 1616,\n 1419,\n 1309,\n 1083,\n 674,\n 553,\n 782,\n 1164,\n 1290,\n 1320,\n 1631,\n 1806,\n 1715,\n 1919,\n 1944,\n 1356,\n 1193,\n 1583,\n 1190,\n 259,\n 197,\n 180,\n 203,\n 186,\n 200,\n 189,\n 204,\n 212,\n 187,\n 199,\n 217,\n 190,\n 181,\n 187,\n 189,\n 208,\n 218,\n 208,\n 205,\n 222,\n 205,\n 204,\n 219,\n 185,\n 213,\n 220,\n 230,\n 198,\n 184,\n 182,\n 221,\n 202,\n 195,\n 207,\n 183,\n 204,\n 191,\n 225,\n 187,\n 225,\n 169,\n 189,\n 192,\n 172,\n 194,\n 215,\n 218,\n 199,\n 214,\n 184,\n 198,\n 206,\n 205,\n 197,\n 224,\n 213,\n 406,\n 689,\n 617,\n 508,\n 445,\n 404,\n 443,\n 444,\n 462,\n 409,\n 390,\n 432,\n 585,\n 823,\n 1072,\n 1399,\n 1774,\n 2138,\n 2152,\n 2045,\n 2278,\n 2313,\n 2217,\n 2283,\n 1591,\n 1638,\n 1598,\n 1829,\n 1982,\n 2097,\n 2102,\n 2427,\n 2363,\n 2488,\n 2433,\n 2293,\n 2105,\n 2070,\n 1911,\n 1852,\n 1699,\n 1636,\n 1506,\n 1412,\n 1346,\n 1242,\n 951,\n 695,\n 378,\n 259,\n 299,\n 376,\n 548,\n 871,\n 1219,\n 1234,\n 1432,\n 699,\n 1257,\n 1774,\n 1259,\n 1656,\n 524,\n 213,\n 215,\n 190,\n 222,\n 204,\n 192,\n 185,\n 175,\n 185,\n 196,\n 211,\n 195,\n 184,\n 172,\n 182,\n 213,\n 188,\n 204,\n 189,\n 217,\n 170,\n 219,\n 222,\n 202,\n 178,\n 218,\n 195,\n 208,\n 225,\n 213,\n 209,\n 181,\n 186,\n 186,\n 175,\n 188,\n 212,\n 190,\n 224,\n 219,\n 206,\n 196,\n 212,\n 181,\n 241,\n 204,\n 210,\n 200,\n 217,\n 213,\n 206,\n 185,\n 223,\n 197,\n 225,\n 189,\n 229,\n 462,\n 837,\n 666,\n 436,\n 429,\n 539,\n 968,\n 1087,\n 301,\n 195,\n 231,\n 230,\n 283,\n 312,\n 349,\n 397,\n 431,\n 430,\n 456,\n 479,\n 492,\n 573,\n 495,\n 456,\n 619,\n 794,\n 939,\n 1112,\n 1295,\n 1296,\n 1435,\n 1689,\n 1745,\n 1848,\n 1753,\n 1723,\n 1829,\n 1811,\n 1727,\n 1625,\n 1591,\n 1569,\n 1433,\n 1340,\n 1155,\n 1027,\n 702,\n 310,\n 217,\n 209,\n 220,\n 234,\n 272,\n 321,\n 430,\n 382,\n 259,\n 360,\n 1612,\n 1557,\n 1652,\n 573,\n 213,\n 221,\n 182,\n 201,\n 208,\n 216,\n 193,\n 219,\n 193,\n 182,\n 226,\n 192,\n 179,\n 214,\n 202,\n 193,\n 214,\n 205,\n 201,\n 178,\n 220,\n 182,\n 210,\n 206,\n 213,\n 216,\n 210,\n 183,\n 218,\n 192,\n 190,\n 203,\n 177,\n 163,\n 210,\n 202,\n 193,\n 207,\n 195,\n 197,\n 198,\n 181,\n 230,\n 203,\n 183,\n 212,\n 183,\n 220,\n 204,\n 163,\n 207,\n 180,\n 177,\n 207,\n 204,\n 180,\n 201,\n 222,\n 300,\n 440,\n 532,\n 461,\n 422,\n 900,\n 848,\n 303,\n 213,\n 210,\n 205,\n 213,\n 194,\n 211,\n 216,\n 225,\n 223,\n 206,\n 194,\n 222,\n 270,\n 285,\n 446,\n 683,\n 802,\n 929,\n 1049,\n 1194,\n 1216,\n 1339,\n 1532,\n 1634,\n 1810,\n 1801,\n 1769,\n 1849,\n 1792,\n 1729,\n 1654,\n 1609,\n 1553,\n 1371,\n 1354,\n 1238,\n 1062,\n 703,\n 350,\n 201,\n 213,\n 190,\n 196,\n 211,\n 213,\n 203,\n 226,\n 209,\n 248,\n 595,\n 1719,\n 1644,\n 527,\n 225,\n 189,\n 218,\n 199,\n 187,\n 181,\n 187,\n 200,\n 202,\n 187,\n 157,\n 181,\n 199,\n 201,\n 223,\n 195,\n 171,\n 201,\n 200,\n 213,\n 193,\n 209,\n 182,\n 202,\n 193,\n 190,\n 206,\n 192,\n 185,\n 198,\n 210,\n 198,\n 197,\n 201,\n 190,\n 191,\n 195,\n 169,\n 203,\n 182,\n 208,\n 201,\n 210,\n 217,\n 182,\n 187,\n 191,\n 211,\n 201,\n 208,\n 205,\n 220,\n 189,\n 220,\n 183,\n 190,\n 194,\n 198,\n 237,\n 197,\n 212,\n 227,\n 281,\n 819,\n 1081,\n 387,\n 210,\n 202,\n 189,\n 229,\n 191,\n 172,\n 213,\n 205,\n 198,\n 220,\n 188,\n 226,\n 332,\n 591,\n 722,\n 888,\n 909,\n 1115,\n 1149,\n 1241,\n 1355,\n 1485,\n 1636,\n 1714,\n 1766,\n 1797,\n 1751,\n 1741,\n 1696,\n 1737,\n 1616,\n 1543,\n 1525,\n 1371,\n 1332,\n 1158,\n 976,\n 525,\n 232,\n 197,\n 201,\n 216,\n 184,\n 199,\n 205,\n 222,\n 192,\n 192,\n 227,\n 439,\n 1524,\n 672,\n 222,\n 207,\n 196,\n 195,\n 197,\n 192,\n 199,\n 165,\n 212,\n 174,\n 234,\n 216,\n 212,\n 197,\n 188,\n 199,\n 188,\n 226,\n 219,\n 219,\n 197,\n 173,\n 197,\n 181,\n 207,\n 192,\n 196,\n 208,\n 186,\n 203,\n 212,\n 181,\n 212,\n 189,\n 193,\n 187,\n 212,\n 212,\n 195,\n 200,\n 204,\n 190,\n 220,\n 189,\n 212,\n 214,\n 198,\n 176,\n 185,\n 193,\n 214,\n 205,\n 212,\n 185,\n 217,\n 219,\n 175,\n 187,\n 229,\n 218,\n 193,\n 215,\n 216,\n 199,\n 287,\n 793,\n 788,\n 301,\n 205,\n 178,\n 194,\n 183,\n 184,\n 212,\n 180,\n 211,\n 210,\n 218,\n 383,\n 620,\n 770,\n 899,\n 1027,\n 1157,\n 1250,\n 1272,\n 1359,\n 1442,\n 1608,\n 1657,\n 1768,\n 1805,\n 1814,\n 1782,\n 1777,\n 1797,\n 1628,\n 1615,\n 1526,\n 1520,\n 1431,\n 1246,\n 1138,\n 938,\n 556,\n 223,\n 200,\n 202,\n 211,\n 183,\n 177,\n 199,\n 204,\n 198,\n 196,\n 184,\n 256,\n 324,\n 247,\n 203,\n 188,\n 196,\n 167,\n 192,\n 176,\n 199,\n 197,\n 189,\n 203,\n 179,\n 178,\n 195,\n 194,\n 216,\n 216,\n 207,\n 197,\n 177,\n 217,\n 221,\n 201,\n 193,\n 186,\n 208,\n 187,\n 212,\n 225,\n 177,\n 190,\n 168,\n 189,\n 206,\n 202,\n 216,\n 230,\n 215,\n 192,\n 194,\n 221,\n 213,\n 181,\n 215,\n 199,\n 227,\n 212,\n 197,\n 200,\n 196,\n 197,\n 212,\n 202,\n 184,\n 193,\n 200,\n 209,\n 196,\n 221,\n 200,\n 225,\n 168,\n 230,\n 207,\n 211,\n 195,\n 379,\n 809,\n 459,\n 223,\n 226,\n 219,\n 188,\n 202,\n 171,\n 188,\n 228,\n 245,\n 453,\n 708,\n 846,\n 936,\n 1085,\n 1167,\n 1280,\n 1295,\n 1616,\n 1564,\n 1515,\n 1820,\n 1738,\n 1810,\n 1827,\n 1798,\n 1756,\n 1802,\n 1725,\n 1660,\n 1614,\n 1519,\n 1516,\n 1347,\n 1325,\n 1070,\n 831,\n 471,\n 209,\n 206,\n 199,\n 208,\n 217,\n 215,\n 200,\n 220,\n 182,\n 174,\n 195,\n 195,\n 209,\n 194,\n 198,\n 181,\n 189,\n 183,\n 198,\n 199,\n 182,\n 185,\n 192,\n 185,\n 186,\n 204,\n 181,\n 188,\n 202,\n 210,\n 187,\n 200,\n 212,\n 196,\n 193,\n 193,\n 208,\n 189,\n 184,\n 212,\n 210,\n 191,\n 209,\n 190,\n 204,\n 208,\n 222,\n 202,\n 190,\n 211,\n 227,\n 187,\n 213,\n 209,\n 207,\n 216,\n 193,\n 197,\n 201,\n 200,\n 196,\n 203,\n 204,\n 189,\n 195,\n 210,\n 194,\n 182,\n 217,\n 190,\n 178,\n 210,\n 207,\n 195,\n 218,\n 208,\n 222,\n 205,\n 194,\n 207,\n 379,\n 715,\n 395,\n 231,\n 207,\n 205,\n 190,\n 192,\n 181,\n 242,\n 431,\n 709,\n 916,\n 1022,\n 1070,\n 1185,\n 1264,\n 1364,\n 1451,\n 1466,\n 1419,\n 1539,\n 1651,\n 1766,\n 1856,\n 1844,\n 1816,\n 1905,\n 1839,\n 1707,\n 1711,\n 1679,\n 1636,\n 1452,\n 1372,\n 1274,\n 1097,\n 867,\n 520,\n 252,\n 198,\n 222,\n 197,\n 185,\n 214,\n 209,\n 206,\n 208,\n 184,\n 236,\n 196,\n 211,\n 202,\n 182,\n 198,\n 209,\n 203,\n 218,\n 221,\n 206,\n 201,\n 187,\n 220,\n 202,\n 195,\n 205,\n 217,\n 199,\n 186,\n 219,\n 202,\n 167,\n 186,\n 190,\n 196,\n 211,\n 203,\n 185,\n 188,\n 207,\n 207,\n 224,\n 213,\n 191,\n 222,\n 208,\n 174,\n 231,\n 202,\n 214,\n 210,\n 201,\n 197,\n 190,\n 191,\n 182,\n 197,\n 198,\n 224,\n 203,\n 197,\n 187,\n 201,\n 203,\n 198,\n 189,\n 194,\n 193,\n 194,\n 214,\n 188,\n 210,\n 198,\n 195,\n 177,\n 180,\n 180,\n 209,\n 196,\n 259,\n 338,\n 578,\n 432,\n 246,\n 203,\n 214,\n 196,\n 189,\n 374,\n 747,\n 898,\n 1077,\n 1259,\n 1263,\n 1318,\n 1406,\n 1438,\n 1461,\n 1511,\n 1575,\n 1666,\n 1681,\n 1787,\n 1850,\n 1751,\n 1794,\n 1789,\n 1834,\n 1719,\n 1656,\n 1721,\n 1600,\n 1488,\n 1380,\n 1325,\n 1060,\n 855,\n 537,\n 251,\n 200,\n 200,\n 197,\n 180,\n 209,\n 183,\n 226,\n 175,\n 205,\n 190,\n 197,\n 197,\n 194,\n 190,\n 208,\n 177,\n 208,\n 183,\n 186,\n 206,\n 216,\n 210,\n 195,\n 197,\n 214,\n 223,\n 186,\n 231,\n 200,\n 220,\n 210,\n ),\n dtype=np.float32,\n).reshape(175, 119)\n\n\n# main ########################################################################\n\nif __name__ == \"__main__\":\n import sys\n import traceback\n\n if \"gl\" in sys.argv or \"opengl\" in sys.argv:\n backend = \"opengl\"\n elif \"osmesa\" in sys.argv or \"mesa\" in sys.argv:\n backend = \"osmesa\"\n else:\n backend = \"mpl\"\n logger.info(\"BACKEND: %s\", backend)\n\n app = qt.QApplication([])\n\n # Exception handler\n def handler(type_, value, trace):\n logger.error(\"%s %s %s\" % (type_, value, \"\".join(traceback.format_tb(trace))))\n\n sys.excepthook = handler\n window = TestWindow(parent=None, backend=backend)\n\n sys.exit(app.exec_())\n"
] |
[
[
"numpy.random.random",
"numpy.sqrt",
"numpy.arange",
"numpy.random.random_sample",
"numpy.dtype",
"numpy.ones",
"numpy.sin",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
TJUMMG/SiamDMU
|
[
"728ba1333b8d600a8b238a7d29901a01b653c33d"
] |
[
"pysot/models/neck/neck.py"
] |
[
"# Copyright (c) SenseTime. All Rights Reserved.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nimport torch.nn as nn\n\n\nclass AdjustLayer(nn.Module):\n def __init__(self, in_channels, out_channels, center_size=7):\n super(AdjustLayer, self).__init__()\n self.downsample = nn.Sequential(\n nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=False),\n nn.BatchNorm2d(out_channels),\n )\n self.center_size = center_size\n\n def forward(self, x):\n x = self.downsample(x)\n if x.size(3) < 20:\n l = (x.size(3) - self.center_size) // 2\n r = l + self.center_size\n x = x[:, :, l:r, l:r]\n return x\n\n\nclass AdjustAllLayer(nn.Module):\n def __init__(self, in_channels, out_channels, center_size=7):\n super(AdjustAllLayer, self).__init__()\n self.num = len(out_channels)\n if self.num == 1:\n self.downsample = AdjustLayer(in_channels[0],\n out_channels[0],\n center_size)\n else:\n for i in range(self.num): # from 0\n self.add_module('downsample'+str(i+2),\n AdjustLayer(in_channels[i],\n out_channels[i],\n center_size))\n\n def forward(self, features):\n if self.num == 1:\n return self.downsample(features)\n else:\n out = []\n for i in range(self.num):\n adj_layer = getattr(self, 'downsample'+str(i+2))\n out.append(adj_layer(features[i]))\n return out\n"
] |
[
[
"torch.nn.Conv2d",
"torch.nn.BatchNorm2d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wxwxzhang/tensorflow
|
[
"fd2d5a3739590590b6bec4c856421ed6eb73a905"
] |
[
"tensorflow/python/data/experimental/ops/distribute.py"
] |
[
"# Copyright 2019 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Distribution Strategy-related dataset transformations.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.python.compat import compat\nfrom tensorflow.python.data.ops import dataset_ops\nfrom tensorflow.python.data.util import nest\nfrom tensorflow.python.data.util import structure\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import gen_experimental_dataset_ops as ged_ops\n\n\nclass _AutoShardDataset(dataset_ops.UnaryDataset):\n \"\"\"A `Dataset` that shards the `Dataset` automatically.\n\n This dataset takes in an existing dataset and tries to automatically figure\n out how to shard the dataset in a multi-worker scenario. Currently, it uses\n Grappler to walk up the dataset graph until it finds a reader dataset (e.g.\n CSVDataset, TFRecordDataset), then inserts a ShardDataset op before that node\n so that each worker only sees some files.\n\n Args:\n num_workers: Total number of workers to shard this dataset across.\n index: The current worker index (out of the total number of workers) this\n dataset is for.\n\n Raises:\n NotFoundError: If we cannot find a suitable reader dataset to begin\n automatically sharding the dataset.\n \"\"\"\n\n def __init__(self, input_dataset, num_workers, index):\n self._input_dataset = input_dataset\n\n self._element_spec = input_dataset.element_spec\n if compat.forward_compatible(2019, 8, 3):\n variant_tensor = ged_ops.auto_shard_dataset(\n self._input_dataset._variant_tensor, # pylint: disable=protected-access\n num_workers=num_workers,\n index=index,\n **self._flat_structure)\n else:\n variant_tensor = ged_ops.experimental_auto_shard_dataset(\n self._input_dataset._variant_tensor, # pylint: disable=protected-access\n num_workers=num_workers,\n index=index,\n **self._flat_structure)\n super(_AutoShardDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._element_spec\n\n\ndef _AutoShardDatasetV1(input_dataset, num_workers, index): # pylint: disable=invalid-name\n return dataset_ops.DatasetV1Adapter(\n _AutoShardDataset(input_dataset, num_workers, index))\n\n\nclass _RebatchDataset(dataset_ops.UnaryDataset):\n \"\"\"A `Dataset` that divides the batch size by `num_workers`.\n\n For each batch in the input dataset, the resulting dataset will produce\n `num_replicas` minibatches whose sizes add up to the original batch size.\n \"\"\"\n\n def __init__(self, input_dataset, num_workers, use_fallback=True):\n self._input_dataset = input_dataset\n\n def recalculate_output_shapes(output_shapes):\n \"\"\"Recalculates the output_shapes after dividing it by num_workers.\"\"\"\n if len(output_shapes) < 1:\n raise ValueError(\n \"Input shape should have at least one dimension. \"\n \"Perhaps your input dataset is not batched?\")\n output_dims = [d.value for d in output_shapes.dims]\n\n if output_dims[0] is not None and output_dims[0] % num_workers == 0:\n output_dims[0] = output_dims[0] // num_workers\n else:\n # Set the batch dimension to unknown. If the global batch size does not\n # divide num_workers evenly, the minibatches may have different sizes.\n output_dims[0] = None\n return tensor_shape.TensorShape(output_dims)\n\n input_types = dataset_ops.get_legacy_output_types(self._input_dataset)\n input_shapes = dataset_ops.get_legacy_output_shapes(self._input_dataset)\n input_classes = dataset_ops.get_legacy_output_classes(self._input_dataset)\n output_shapes = nest.map_structure(recalculate_output_shapes, input_shapes)\n\n self._element_spec = structure.convert_legacy_structure(\n input_types, output_shapes, input_classes)\n if compat.forward_compatible(2019, 8, 13) or not use_fallback:\n variant_tensor = ged_ops.rebatch_dataset(\n self._input_dataset._variant_tensor, # pylint: disable=protected-access\n num_workers=num_workers,\n use_fallback=use_fallback,\n **self._flat_structure)\n elif compat.forward_compatible(2019, 8, 3):\n variant_tensor = ged_ops.rebatch_dataset(\n self._input_dataset._variant_tensor, # pylint: disable=protected-access\n num_workers=num_workers,\n **self._flat_structure)\n else:\n variant_tensor = ged_ops.experimental_rebatch_dataset(\n self._input_dataset._variant_tensor, # pylint: disable=protected-access\n num_workers=num_workers,\n **self._flat_structure)\n super(_RebatchDataset, self).__init__(input_dataset, variant_tensor)\n\n @property\n def element_spec(self):\n return self._element_spec\n\n\nclass _RemoteDataset(dataset_ops.DatasetSource):\n \"\"\"Creates a dataset on a given `device` given a graph def.\"\"\"\n\n def __init__(self, graph_def, device, element_spec):\n self._elem_spec = element_spec\n with ops.device(device):\n variant_tensor = ged_ops.dataset_from_graph(graph_def)\n super(_RemoteDataset, self).__init__(variant_tensor)\n\n @property\n def element_spec(self):\n return self._elem_spec\n\n\ndef replicate(dataset, devices):\n \"\"\"A transformation that replicates `dataset` onto a list of devices.\n\n Args:\n dataset: A `tf.data.Dataset` object.\n devices: A list of devices to replicate the dataset on.\n\n Returns:\n A dictionary mapping device name to a dataset on that device.\n \"\"\"\n if not isinstance(dataset, dataset_ops.DatasetV2):\n raise TypeError(\"`dataset` must be a `tf.data.Dataset` object.\")\n\n graph_def = dataset._as_serialized_graph() # pylint: disable=protected-access\n datasets = {}\n for device in devices:\n ds = _RemoteDataset(graph_def, device, dataset.element_spec)\n datasets[device] = ds\n return datasets\n\n\n_AutoShardDatasetV1.__doc__ = _AutoShardDataset.__doc__\n"
] |
[
[
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.gen_experimental_dataset_ops.dataset_from_graph",
"tensorflow.python.compat.compat.forward_compatible",
"tensorflow.python.ops.gen_experimental_dataset_ops.rebatch_dataset",
"tensorflow.python.data.ops.dataset_ops.get_legacy_output_classes",
"tensorflow.python.ops.gen_experimental_dataset_ops.auto_shard_dataset",
"tensorflow.python.data.util.nest.map_structure",
"tensorflow.python.data.util.structure.convert_legacy_structure",
"tensorflow.python.framework.ops.device",
"tensorflow.python.data.ops.dataset_ops.get_legacy_output_shapes",
"tensorflow.python.data.ops.dataset_ops.get_legacy_output_types",
"tensorflow.python.ops.gen_experimental_dataset_ops.experimental_rebatch_dataset",
"tensorflow.python.ops.gen_experimental_dataset_ops.experimental_auto_shard_dataset"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"2.7",
"2.6",
"2.4",
"2.3",
"2.9",
"2.5",
"2.2",
"2.10"
]
}
] |
chaitanyaKaushal/TOPSIS-Chaitanya-101803269
|
[
"7b67d603806867114c3cc7787ad3e786bb1d4b7b"
] |
[
"TOPSIS_Chaitanya_Kaushal/topsis.py"
] |
[
"if __name__ == \"__main__\":\n import os\n import numpy as np\n import pandas as pd\n import sys\n import copy\n\n if len(sys.argv) != 5:\n raise Exception('Number of params incorrect')\n\n dataFile = sys.argv[1]\n weights = sys.argv[2]\n impacts = sys.argv[3]\n resultFile = sys.argv[4]\n\n #note : len(weights) = number of commas + 1\n try:\n weights = np.array([x.strip() for x in weights.split(',')],dtype = float)\n impacts = np.array([x.strip() for x in impacts.split(',')],dtype = str)\n except:\n raise Exception('Invalid data entries for wieghts/impacts')\n for element in impacts:\n if element != '+' and element != '-':\n raise Exception('Incorrect Impact')\n\n if os.path.exists(dataFile) == False:\n raise Exception('File not Found')\n\n df = pd.read_csv(dataFile)\n if len(df.columns) < 3:\n raise Exception('Number of columns incorrect')\n\n # corr = np.array(df['Corr'])\n # rseq = np.array(df['Rseq'])\n # rmse = np.array(df['RMSE'])\n # accuracy = np.array(df['Accuracy'])\n columns_np = []\n\n #handling non-numeric values\n try:\n for column in df.columns[1:]:\n col = np.array(df[column],dtype = float)\n columns_np.append(col)\n except:\n raise Exception('Entries were not numeric values')\n\n columns_np = np.array(columns_np)\n lenCheck = len(columns_np[0])\n for col in columns_np :\n if lenCheck != len(col):\n raise Exception('Incorrect length Match')\n\n if (len(impacts) != len(df.columns) - 1 ) or (len(weights) != len(df.columns) - 1):\n raise Exception('Incorrect Length Match')\n\n\n #After all exceptions are handled , we are good to go =>\n\n topsisScore = []\n ranking = [None]*(len(df[df.columns[0]]))\n\n denominator = []\n for col in columns_np:\n denominator.append(np.sum(col**2))\n\n # finding the weighted normalized values\n print(type(weights[0]))\n for i in range(len(columns_np)):\n columns_np[i] = columns_np[i] * (weights[i] / denominator[i])\n\n # finding ideal best and ideal worst\n ideal_best = []\n ideal_worst = []\n for i in range(len(columns_np)):\n if impacts[i] == '+':\n ideal_best.append(np.max(columns_np[i]))\n ideal_worst.append(np.min(columns_np[i]))\n else:\n ideal_best.append(np.min(columns_np[i]))\n ideal_worst.append(np.max(columns_np[i]))\n\n #finding euclidean distance between ideal best and ideal worst\n\n for i in range(len(df[df.columns[0]])): # for each criteria object mode\n sum_best = 0\n sum_worst = 0\n for j in range(len(columns_np)): # for columns 2nd till last\n sum_best += (columns_np[j][i] - ideal_best[j]) * (columns_np[j][i] - ideal_best[j])\n sum_worst += (columns_np[j][i] - ideal_worst[j]) * (columns_np[j][i] - ideal_worst[j])\n\n sum_best = (sum_best ** (0.5))\n sum_worst = (sum_worst ** (0.5))\n topsisScore.append(sum_worst / (sum_best + sum_worst) )\n\n pseudo_score = copy.deepcopy(topsisScore)\n\n rank = 1\n for count in range(len(pseudo_score)):\n idx = pseudo_score.index(max(pseudo_score))\n ranking[idx] = rank\n pseudo_score[idx] = -1\n rank = rank + 1\n\n df_new = copy.deepcopy(df)\n df_new['Topsis Score'] = topsisScore\n df_new['Rank'] = ranking\n df_new.to_csv(resultFile,index = False)\n\n\n\n\n\n\n"
] |
[
[
"pandas.read_csv",
"numpy.min",
"numpy.max",
"numpy.array",
"numpy.sum"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
wilhelmzh/Paddle
|
[
"e352467c1c3843cb0036604d81a8185c6d6cb337"
] |
[
"python/paddle/fluid/optimizer.py"
] |
[
"# Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import print_function\n\nimport numpy as np\nfrom collections import defaultdict\n\nfrom paddle.fluid.distribute_lookup_table import find_distributed_lookup_table\nfrom paddle.fluid.framework import Program, Variable, name_scope, default_main_program, default_startup_program\n\nfrom . import framework\nfrom . import layers\nfrom . import unique_name\nfrom .backward import append_backward, _some_in_set_, _append_grad_suffix_\nfrom .clip import append_gradient_clip_ops, error_clip_callback\nfrom .framework import program_guard\nfrom .initializer import Constant\nfrom .layer_helper import LayerHelper\nfrom .layers import ops\nfrom .regularizer import append_regularization_ops\nfrom .dygraph import base as imperative_base\nfrom .dygraph.learning_rate_scheduler import LearningRateDecay\nfrom paddle.fluid import core\nfrom paddle.fluid.layers import tensor\nfrom functools import reduce\nfrom .wrapped_decorator import signature_safe_contextmanager\n\n__all__ = [\n 'SGD', 'Momentum', 'Adagrad', 'Adam', 'Adamax', 'DecayedAdagrad', 'Ftrl',\n 'SGDOptimizer', 'MomentumOptimizer', 'AdagradOptimizer', 'AdamOptimizer',\n 'AdamaxOptimizer', 'DecayedAdagradOptimizer', 'RMSPropOptimizer',\n 'FtrlOptimizer', 'Adadelta', 'ModelAverage', 'LarsMomentum',\n 'LarsMomentumOptimizer', 'DGCMomentumOptimizer', 'LambOptimizer',\n 'ExponentialMovingAverage', 'PipelineOptimizer', 'LookaheadOptimizer'\n]\n\n\nclass Optimizer(object):\n \"\"\"Optimizer Base class.\n\n Define the common interface of an optimizer.\n User should not use this class directly,\n but need to use one of it's implementation.\n \"\"\"\n\n @imperative_base.no_grad\n def __init__(self, learning_rate, regularization=None, name=None):\n if framework.in_dygraph_mode():\n if not isinstance(learning_rate, float) and \\\n not isinstance(learning_rate, LearningRateDecay):\n raise TypeError(\n \"learning rate should be float or LearningRateDecay, got %s here\"\n % type(learning_rate))\n if name is not None:\n self._name = unique_name.generate(name)\n else:\n self._name = unique_name.generate(self.__class__.__name__)\n else:\n if not isinstance(learning_rate, float) and \\\n not isinstance(learning_rate, framework.Variable):\n raise TypeError(\n \"learning rate should be float or Variable, got %s here\" %\n type(learning_rate))\n self._name = name\n\n self.regularization = regularization\n self._learning_rate = learning_rate\n # the learning rate type should be inferenced from loss\n self._dtype = None\n # each program should have a independent learning rate\n # program -> Variable(learning_rate)\n self._learning_rate_map = dict()\n if isinstance(self._learning_rate, framework.Variable):\n self._learning_rate_map[framework.default_main_program(\n )] = self._learning_rate\n # Dictionary of accumulators. Some optimizer subclasses need to\n # allocate and manage extra variables associated with the parameters\n # to train. These variables are called accumulators.\n # {accum_name : { paramter_name : accumulator_for_parameter, ...}, ...}\n self._accumulators = defaultdict(lambda: dict())\n self.helper = None\n self._opti_name_list = []\n\n def load(self, stat_dict):\n \"\"\"\n load optimizer with learning rate decay in dygraph mode\n :return: None\n\n Args:\n stat_dict: the dict load by load_persistable method\n\n Examples:\n\n .. code-block:: python\n\n from __future__ import print_function\n import numpy as np\n import paddle\n import paddle.fluid as fluid\n from paddle.fluid.optimizer import SGDOptimizer\n from paddle.fluid.dygraph.nn import FC\n from paddle.fluid.dygraph.base import to_variable\n\n class MLP(fluid.Layer):\n def __init__(self, name_scope):\n super(MLP, self).__init__(name_scope)\n\n self._fc1 = FC(self.full_name(), 10)\n self._fc2 = FC(self.full_name(), 10)\n\n def forward(self, inputs):\n y = self._fc1(inputs)\n y = self._fc2(y)\n return y\n\n with fluid.dygraph.guard():\n mlp = MLP('mlp')\n optimizer2 = SGDOptimizer(\n learning_rate=fluid.layers.natural_exp_decay(\n learning_rate=0.1,\n decay_steps=10000,\n decay_rate=0.5,\n staircase=True))\n\n train_reader = paddle.batch(\n paddle.dataset.mnist.train(), batch_size=128, drop_last=True)\n\n for batch_id, data in enumerate(train_reader()):\n dy_x_data = np.array(\n [x[0].reshape(1, 28, 28) for x in data]).astype('float32')\n\n y_data = np.array([x[1] for x in data]).astype('int64').reshape(\n 128, 1)\n\n img = to_variable(dy_x_data)\n label = to_variable(y_data)\n label._stop_gradient = True\n cost = mlp(img)\n avg_loss = fluid.layers.reduce_mean(cost)\n avg_loss.backward()\n optimizer.minimize(avg_loss)\n mlp.clear_gradients()\n fluid.dygraph.save_persistables(\n mlp.state_dict(), [optimizer, optimizer2], \"save_dir_2\")\n if batch_id == 2:\n break\n\n with fluid.dygraph.guard():\n mlp_load = MLP('mlp')\n optimizer_load2 = SGDOptimizer(\n learning_rate=fluid.layers.natural_exp_decay(\n learning_rate=0.1,\n decay_steps=10000,\n decay_rate=0.5,\n staircase=True))\n parameters, optimizers = fluid.dygraph.load_persistables(\n \"save_dir_2\")\n mlp_load.load_dict(parameters)\n optimizer_load2.load(optimizers)\n self.assertTrue(optimizer2._learning_rate.__dict__ == optimizer_load2._learning_rate.__dict__)\n\n \"\"\"\n if framework.in_dygraph_mode():\n self._learning_rate = stat_dict[self._name]\n else:\n raise TypeError(\"load can only be used under DyGraph mode\")\n\n def get_opti_var_name_list(self):\n return self._opti_name_list\n\n def _create_global_learning_rate(self):\n if imperative_base.enabled():\n # create learning rate Variable\n if isinstance(self._learning_rate, float):\n lr = self._global_learning_rate()\n\n if isinstance(lr, framework.Variable):\n return\n else:\n self._learning_rate_map[framework.default_main_program(\n )] = layers.create_global_var(\n name=unique_name.generate(\"learning_rate\"),\n shape=[1],\n value=float(self._learning_rate),\n dtype='float32' if self._dtype is None else self._dtype,\n persistable=True)\n # get learning rate Variable from LearningRateDecay\n elif isinstance(self._learning_rate, LearningRateDecay):\n self._learning_rate_map[framework.default_main_program(\n )] = self._learning_rate()\n else:\n raise TypeError(\n \"optimizer's learning rate must be float or LearningRateDecay\"\n )\n else:\n lr = self._global_learning_rate()\n\n if isinstance(lr, framework.Variable):\n return\n else:\n if not isinstance(self._learning_rate, float):\n raise TypeError(\n \"learning rate variable is create outside optimizer,\"\n \"can not create new learning rate variable for new program\"\n )\n\n # create learning rate in the current main program\n self._learning_rate_map[framework.default_main_program(\n )] = layers.create_global_var(\n name=unique_name.generate(\"learning_rate\"),\n shape=[1],\n value=float(self._learning_rate),\n dtype='float32' if self._dtype is None else self._dtype,\n persistable=True)\n\n def _global_learning_rate(self, program=None):\n \"\"\"\n get global decayed learning rate\n :return:\n \"\"\"\n if program is None:\n program = framework.default_main_program()\n return self._learning_rate_map.get(program, None)\n\n def _append_optimize_op(self, block, param_and_grad):\n \"\"\" append optimize operator to block and return all the added optimize_op\n \"\"\"\n raise NotImplementedError()\n\n def _create_param_lr(self, param_and_grad):\n # create learning rate variable for every parameter\n param = param_and_grad[0]\n param_lr = param.optimize_attr['learning_rate']\n if type(param_lr) == Variable:\n return param_lr\n else:\n if param_lr == 1.0:\n return self._global_learning_rate()\n else:\n with default_main_program()._lr_schedule_guard(\n is_with_opt=True), framework.name_scope(\n 'scale_with_param_lr'):\n return self._global_learning_rate() * param_lr\n\n def _create_accumulators(self, block, parameters):\n \"\"\"Create all accumulators needed by the parameters\n\n Args:\n block: the block in which the loss variable is present\n parameters: list of parameter variables for the optimizer\n \"\"\"\n pass\n\n def _finish_update(self, block, parameters_and_grads):\n \"\"\"Finish any custom updates needed\n before completing an optimization step\n\n Args:\n block: the block in which the loss variable is present\n parameters: list of parameter variables for the optimizer\n\n Returns:\n None\n \"\"\"\n pass\n\n def _add_accumulator(self,\n name,\n param,\n dtype=None,\n fill_value=0.0,\n shape=None):\n \"\"\"Utility function to add an accumulator for a parameter\n\n Args:\n block: the block in which the loss variable is present\n name: name of the accumulator\n param: parameter variable for which accumulator is to be added\n dtype: data type of the accumulator variable\n fill_value: value to initialize the accumulator variable\n \"\"\"\n if self._name is not None:\n name = self._name + \"_\" + name\n if (name in self._accumulators and\n param.name in self._accumulators[name]):\n if framework.in_dygraph_mode():\n return self._accumulators[name][param.name]\n raise Exception(\"Accumulator {} already exists for parameter {}\".\n format(name, param.name))\n if shape == None:\n shape = param.shape\n assert isinstance(self.helper, LayerHelper)\n\n var_name = param.name + \"_\" + name\n var_name = unique_name.generate(var_name)\n self._opti_name_list.append(var_name)\n\n var = self.helper.create_global_variable(\n name=var_name,\n persistable=True,\n dtype=dtype or param.dtype,\n type=param.type,\n shape=shape)\n self.helper.set_variable_initializer(\n var, initializer=Constant(value=float(fill_value)))\n self._accumulators[name][param.name] = var\n return var\n\n def _get_accumulator(self, name, param):\n \"\"\"Utility function to fetch an accumulator for a parameter\n\n Args:\n name: name of the accumulator\n param: parameter variable for which accumulator is to be fetched\n\n Returns:\n accumulator variable for the parameter\n \"\"\"\n if self._name is not None:\n name = self._name + \"_\" + name\n if (name not in self._accumulators or\n param.name not in self._accumulators[name]):\n raise Exception(\"Accumulator {} does not exist for parameter {}\".\n format(name, param.name))\n return self._accumulators[name][param.name]\n\n def _create_optimization_pass(self, parameters_and_grads):\n \"\"\"Add optimization operators to update gradients to variables.\n\n Args:\n parameters_and_grads(list(tuple(Variable, Variable))):\n a list of (variable, gradient) pair to update.\n\n Returns:\n return_op_list: a list of operators that will complete one step of\n optimization. This will include parameter update ops, global step\n update ops and any other custom ops required by subclasses to manage\n their internal state.\n \"\"\"\n # This is a default implementation of create_optimization_pass that\n # can be shared by most optimizers. This implementation assumes that\n # the subclass will implement the _append_optimize_op method and the\n # _initialize_tensors method. The subclass can extend the\n # _create_accumulators method if it needs to create accumulators\n # for parameters and extend _finish_update method to add custom ops.\n\n # Allways called under program_guard use global block as loss block\n global_block = framework.default_main_program().global_block()\n start = len(global_block.ops)\n self.helper = LayerHelper(self.__class__.__name__)\n self._create_accumulators(\n global_block,\n [p[0] for p in parameters_and_grads if p[0].trainable])\n self._create_global_learning_rate()\n\n optimize_ops = []\n if framework.in_dygraph_mode():\n for param_and_grad in parameters_and_grads:\n if param_and_grad[1] is None:\n continue\n with param_and_grad[0].block.program._optimized_guard(\n param_and_grad):\n if param_and_grad[0].trainable is True:\n optimize_op = self._append_optimize_op(global_block,\n param_and_grad)\n optimize_ops.append(optimize_op)\n else:\n for param_and_grad in parameters_and_grads:\n if param_and_grad[1] is None:\n continue\n with param_and_grad[0].block.program._optimized_guard(\n param_and_grad), name_scope(\"optimizer\"):\n if param_and_grad[0].trainable is True:\n optimize_op = self._append_optimize_op(global_block,\n param_and_grad)\n optimize_ops.append(optimize_op)\n\n # Get custom finish ops for subclasses\n # FIXME: Need to fix this once we figure out how to handle dependencies\n self._finish_update(global_block, parameters_and_grads)\n\n end = len(global_block.ops)\n return global_block._slice_ops(start, end)\n\n def _process_distribute_lookuptable(self, param_grads):\n \"\"\"\n Because distribute lookup table only support SGD optimizer for now, not support\n other optimizer and regularization, so we should find the table parameter out,\n and avoid to add regularization and other op for it, and add sgd optimize op\n for it independently.\n :param param_grads(list((Var, Var))): list of (param, grad) pair.\n :param loss: the loss variable.\n :param startup_program: the startup program\n \"\"\"\n program = framework.default_main_program()\n global_block = framework.default_main_program().global_block()\n table_name = find_distributed_lookup_table(program)\n table_param = None\n table_grad = None\n new_param_grads = []\n for p, g in param_grads:\n if p.name == table_name:\n if table_param is not None:\n raise RuntimeError(\n \"multi dist table var found, only support one now!\")\n table_param = p\n table_grad = g\n else:\n new_param_grads.append((p, g))\n sgd_op = None\n if table_param is not None:\n param_and_grad = [table_param, table_grad]\n with table_param.block.program._optimized_guard(param_and_grad), \\\n framework.name_scope(\"optimizer\"):\n self._create_global_learning_rate()\n # create the optimize op\n sgd_op = global_block.append_op(\n type='sgd',\n inputs={\n \"Param\": table_param,\n \"Grad\": table_grad,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\"ParamOut\": param_and_grad[0]})\n return new_param_grads, (table_param, table_grad), sgd_op\n\n def _append_dgc_ops(self, param_and_grad):\n pass\n\n def backward(self,\n loss,\n startup_program=None,\n parameter_list=None,\n no_grad_set=None,\n callbacks=None):\n \"\"\"\n First part of `minimize`, do auto-diff to append backward ops for\n the current program.\n\n Args:\n loss (Variable): loss variable to run optimizations.\n startup_program (Program): startup_program for initializing parameters\n in `parameter_list`.\n parameter_list (list): list of Variables to update.\n no_grad_set (set|None): set of Variables should be ignored.\n callbacks (list|None): list of callables to run when appending backward\n operator for one parameter.\n\n Return:\n list: list of (param, grad) pair, grad is the output of backward.\n\n Examples:\n See examples in `apply_gradients`.\n \"\"\"\n no_grad_set = self._get_no_grad_set(loss, no_grad_set)\n\n self._dtype = loss.dtype\n if framework.in_dygraph_mode():\n if parameter_list is not None:\n parameters = parameter_list\n else:\n parameters = framework._dygraph_tracer().all_parameters()\n\n params_grads = []\n for param in parameters:\n if not param.trainable:\n continue\n if param._ivar._grad_ivar() is not None:\n # create gradient variable\n grad_var = Variable(\n block=loss.block,\n name=param._ivar._grad_name(),\n stop_gradient=True,\n ivar=param._ivar._grad_ivar())\n params_grads.append((param, grad_var))\n else:\n if callbacks is None:\n callbacks = [error_clip_callback]\n else:\n assert (isinstance(callbacks, list))\n program = loss.block.program\n with program_guard(program, startup_program):\n params_grads = append_backward(loss, parameter_list,\n no_grad_set, callbacks)\n # Note: since we can't use all_reduce_op now,\n # dgc_op should be the last op of one grad.\n self._append_dgc_ops(params_grads)\n return params_grads\n\n def apply_gradients(self, params_grads):\n \"\"\"\n Second part of `minimize`, appending optimization operators for\n given `params_grads` pairs.\n\n Args:\n params_grads (list): list of (param, grad) pair to do optimization.\n\n Returns:\n list: A list of operators appended to the current program.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n loss = network()\n optimizer = fluid.optimizer.SGD(learning_rate=0.1)\n params_grads = optimizer.backward(loss)\n # you may append operations for params_grads here\n # ...\n optimizer.apply_gradients(params_grads)\n \"\"\"\n params_grads = sorted(params_grads, key=lambda x: x[0].name)\n\n params_grads, table_param_and_grad, table_optimize_op = \\\n self._process_distribute_lookuptable(params_grads)\n\n params_grads = append_gradient_clip_ops(params_grads)\n\n # Add regularization if any\n params_grads = append_regularization_ops(params_grads,\n self.regularization)\n\n optimize_ops = self._create_optimization_pass(params_grads)\n if table_optimize_op is not None:\n optimize_ops.append(table_optimize_op)\n params_grads.append(table_param_and_grad)\n\n return optimize_ops\n\n def apply_optimize(self, loss, startup_program, params_grads):\n \"\"\"\n Second part of `minimize`, appending optimization operators for\n given `params_grads` pairs.\n\n Args:\n loss (Variable): loss variable to run optimizations.\n startup_program (Program): startup_program for initializing parameters\n in `parameter_list`.\n params_grads (list): list of (param, grad) pair to do optimization.\n\n Returns:\n list: A list of operators appended to the current program.\n \"\"\"\n if framework.in_dygraph_mode():\n with program_guard(framework.default_main_program(),\n framework.default_startup_program()):\n params_grads = append_regularization_ops(params_grads,\n self.regularization)\n optimize_ops = self._create_optimization_pass(params_grads)\n else:\n program = loss.block.program\n with program_guard(program, startup_program):\n optimize_ops = self.apply_gradients(params_grads)\n return optimize_ops\n\n def _get_no_grad_set(self, loss, no_grad_set=None):\n if no_grad_set is None:\n no_grad_set = set()\n elif isinstance(no_grad_set, set) or isinstance(\n no_grad_set, list) or isinstance(no_grad_set, tuple):\n no_grad_set = set(no_grad_set)\n else:\n assert \"no_grad_set should be a set, but the passed type is {}\".format(\n type(no_grad_set))\n parameters = loss.block.program.global_block().all_parameters()\n param_no_trainable = set(\n [param.name for param in parameters if param.trainable is False])\n # If the parameter is no trainable, it should not have a gradient.\n no_grad_set.update(param_no_trainable)\n\n return no_grad_set\n\n @imperative_base.no_grad\n def minimize(self,\n loss,\n startup_program=None,\n parameter_list=None,\n no_grad_set=None,\n grad_clip=None):\n \"\"\"\n Add operations to minimize `loss` by updating `parameter_list`.\n\n This method combines interface `backward()` and\n `apply_gradients()` into one.\n\n Args:\n loss (Variable): loss variable to run optimizations.\n startup_program (Program): startup_program for initializing parameters\n in `parameter_list`.\n parameter_list (list): list of Variables to update.\n no_grad_set (set|None): set of Variables should be ignored.\n grad_clip (GradClipBase|None) : Gradient clip strategy\n\n Returns:\n tuple: (optimize_ops, params_grads) which are, list of operators appended;\n and list of (param, grad) Variables pair for optimization.\n \"\"\"\n assert isinstance(loss, Variable), \"The loss should be an Variable.\"\n params_grads = self.backward(\n loss,\n startup_program=startup_program,\n parameter_list=parameter_list,\n no_grad_set=no_grad_set)\n\n if grad_clip is not None and framework.in_dygraph_mode():\n # TODO(hongyu): FIX later, this is only for dygraph, should be work for static mode\n params_grads = grad_clip(params_grads)\n\n optimize_ops = self.apply_optimize(\n loss, startup_program=startup_program, params_grads=params_grads)\n\n return optimize_ops, params_grads\n\n\nclass SGDOptimizer(Optimizer):\n \"\"\"\n Optimizer of the stochastic gradient descent algorithm.\n\n .. math::\n\n param\\_out = param - learning\\_rate * grad\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)\n sgd_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n \"\"\"\n\n def __init__(self, learning_rate, regularization=None, name=None):\n assert learning_rate is not None\n super(SGDOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"sgd\"\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n # create the optimize op\n sgd_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\"ParamOut\": param_and_grad[0]},\n stop_gradient=True)\n\n return sgd_op\n\n\nclass MomentumOptimizer(Optimizer):\n \"\"\"\n\n Simple Momentum optimizer with velocity state\n\n This optimizer has a flag for Nestrov Momentum.\n\n The update equations are as follows:\n\n .. math::\n\n & velocity = mu * velocity + gradient\n\n & if (use\\_nesterov):\n\n &\\quad param = param - (gradient + mu * velocity) * learning\\_rate\n\n & else:\n\n &\\quad param = param - learning\\_rate * velocity\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n momentum (float): momentum factor\n use_nesterov (bool): enables Nesterov momentum\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n moment_optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.001, momentum=0.9)\n moment_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n \"\"\"\n _velocity_acc_str = \"velocity\"\n\n def __init__(self,\n learning_rate,\n momentum,\n use_nesterov=False,\n regularization=None,\n name=None):\n assert learning_rate is not None\n assert momentum is not None\n super(MomentumOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"momentum\"\n self._momentum = momentum\n self._use_nesterov = bool(use_nesterov)\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n for p in parameters:\n self._add_accumulator(self._velocity_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n velocity_acc = self._get_accumulator(self._velocity_acc_str,\n param_and_grad[0])\n # create the momentum optimize op\n momentum_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Velocity\": velocity_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"VelocityOut\": velocity_acc\n },\n attrs={\"mu\": self._momentum,\n \"use_nesterov\": self._use_nesterov},\n stop_gradient=True)\n\n return momentum_op\n\n\nclass DGCMomentumOptimizer(MomentumOptimizer):\n \"\"\"\n\n Original paper is https://arxiv.org/abs/1712.01887\n\n DGC reduces the communication bandwidth by sending only the important gradients (sparse update):\\\n only gradients larger than a threshold are transmitted.\n\n To avoid losing information, DGC accumulates the rest of the gradients locally.\n\n Eventually, these gradients become large enough to be transmitted.\n\n Thus, DGC sends the large gradients immediately but eventually send all of the gradients over time.\n\n To ensure no loss of accuracy, DGC employs momentum correction and local gradient clipping on top of the gradient sparsification to maintain model performance.\n\n DGC also uses momentum factor masking and warmup training to overcome the staleness problem caused by reduced communication.\n\n This optimizer will do two things:\n\n 1. Compress the gradient by get TopK import value from tensor \\\n and use it for allreduce to reduce network bandwidth.\n\n 2. Call momentum to optimize on the cost.\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n momentum (float): Momentum factor.\n rampup_begin_step (int): The beginning step from which gradient compression is implemented.\n rampup_step (int): How long it use the sparsity periods. Default is 1.\n for example: If the sparsity is [0.75, 0.9375, 0.984375, 0.996, 0.999], and the rampup_step is 5, \\\n it will use 0.75 at 0 step, and 0.9375 at 1 step, and so on. And when reach sparsity array ends, \\\n it will use 0.999 then and after.\n sparsity (list[float]): Get top important element from gradient tensor, the ratio is (1 - current sparsity).\n use_nesterov (bool): Enables Nesterov momentum. True means use nesterov.\n local_grad_clip_norm (float): Clip norm value if needed.\n num_trainers: The number of training nodes.\n regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer.\n name: An optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n optimizer = fluid.optimizer.DGCMomentumOptimizer(\n learning_rate=0.0001,\n momentum=0.9,\n rampup_step=1000,\n rampup_begin_step=1252,\n sparsity=[0.999, 0.999])\n\n \"\"\"\n\n def __init__(self,\n learning_rate,\n momentum,\n rampup_begin_step,\n rampup_step=1,\n sparsity=[0.999],\n use_nesterov=False,\n local_grad_clip_norm=None,\n num_trainers=None,\n regularization=None,\n name=None):\n self._sparsity = sparsity\n self._rampup_step = rampup_step\n self._rampup_step_var = None\n\n self._rampup_begin_step = rampup_begin_step\n self._rampup_begin_step_var = None\n\n self._global_step_var = None\n self._local_grad_clip_norm = None\n self._clip_norm = None\n\n if local_grad_clip_norm is not None:\n assert isinstance(num_trainers, int)\n assert isinstance(local_grad_clip_norm, float)\n assert num_trainers > 0\n\n self._local_grad_clip_norm = local_grad_clip_norm\n self._num_trainers = num_trainers\n self._clip_norm = local_grad_clip_norm / (num_trainers *\n num_trainers)\n\n super(DGCMomentumOptimizer, self).__init__(\n learning_rate, momentum, use_nesterov, regularization, name)\n\n core.init_dgc()\n\n def _add_auto_increment_var(self, counter_name, begin, step=1):\n helper = LayerHelper('global_step_counter')\n counter, is_new_var = helper.create_or_get_global_variable(\n name=counter_name, dtype='float32', shape=[1], persistable=True)\n if is_new_var:\n helper.set_variable_initializer(\n counter,\n initializer=Constant(\n value=float(begin - 1), force_cpu=True))\n helper.main_program.global_block()._prepend_op(\n type='increment',\n inputs={'X': [counter]},\n outputs={'Out': [counter]},\n attrs={'step': float(step)},\n stop_gradient=True)\n counter.stop_gradient = True\n\n return counter\n\n def _append_dgc_ops(self, param_and_grads):\n start_program = default_startup_program()\n main_program = default_main_program()\n main_program._enable_dgc = True\n\n # step counter\n self._global_step_var = self._add_auto_increment_var(\n counter_name=core.dgc.kDGCCounterName(), begin=0)\n\n # rampup begin step var for all_reduce_op_handle\n self._rampup_begin_step_var = tensor.create_global_var(\n shape=[1],\n dtype=core.VarDesc.VarType.FP32,\n persistable=True,\n name=core.dgc.kDGCRampUpBeginStepName(),\n value=self._rampup_begin_step * 1.0,\n force_cpu=True)\n\n for param_var, grad_var in param_and_grads:\n var_numel = abs(reduce(lambda x, y: x * y, param_var.shape))\n if var_numel < 16384 or \\\n param_var.type == core.VarDesc.VarType.SELECTED_ROWS or \\\n grad_var.type == core.VarDesc.VarType.SELECTED_ROWS or \\\n param_var.dtype != core.VarDesc.VarType.FP32 :\n continue\n\n u_var = tensor.create_global_var(\n shape=param_var.shape,\n dtype=param_var.dtype,\n persistable=True,\n name=param_var.name + core.dgc.kDGCUName(),\n value=0.0)\n v_var = tensor.create_global_var(\n shape=param_var.shape,\n dtype=param_var.dtype,\n persistable=True,\n name=param_var.name + core.dgc.kDGCVName(),\n value=0.0)\n\n k_var = tensor.create_global_var(\n shape=[1],\n dtype=param_var.dtype,\n persistable=True,\n name=param_var.name + core.dgc.kDGCKName(),\n value=0.0,\n force_cpu=True)\n\n encoded_var = tensor.create_global_var(\n shape=[1],\n dtype=param_var.dtype,\n persistable=True,\n name=param_var.name + core.dgc.kDGCEncodedName(),\n value=0.0,\n force_cpu=False)\n\n # del back oprolevarname\n op_maker = core.op_proto_and_checker_maker\n backward = core.op_proto_and_checker_maker.OpRole.Backward\n for op in main_program.global_block().ops:\n if not self._is_the_backward_op(op):\n continue\n\n var_attr = op.all_attrs()[op_maker.kOpRoleVarAttrName()]\n if param_var.name not in var_attr:\n continue\n\n var_attr.remove(param_var.name)\n var_attr.remove(grad_var.name)\n if len(var_attr) > 1:\n op._set_attr(op_maker.kOpRoleVarAttrName(), var_attr)\n else:\n op._remove_attr(op_maker.kOpRoleVarAttrName())\n\n clip_var = grad_var\n if self._local_grad_clip_norm is not None:\n clip_var = self._append_clip_norm(grad_var, self._clip_norm)\n self._dgc_op(param_var, clip_var, grad_var, u_var, v_var, k_var,\n encoded_var)\n\n def _is_the_backward_op(self, op):\n op_maker = core.op_proto_and_checker_maker\n backward = core.op_proto_and_checker_maker.OpRole.Backward\n if op_maker.kOpRoleVarAttrName() in op.attr_names and \\\n int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(backward):\n return True\n return False\n\n def _clip_by_norm(self, x, max_norm, name=None):\n args = {'x': x, 'max_norm': max_norm, 'name': name}\n\n helper = LayerHelper(\"dgc_clip_by_norm_op\", **args)\n\n if name is None:\n name = unique_name.generate_with_ignorable_key(\".\".join(\n [helper.name, 'tmp']))\n\n out = helper.create_variable(\n type=x.type, name=name, dtype=x.dtype, persistable=False)\n\n helper.append_op(\n type=\"dgc_clip_by_norm\",\n inputs={\"X\": x,\n \"current_step\": self._global_step_var},\n attrs={\n \"max_norm\": max_norm,\n \"rampup_begin_step\": float(self._rampup_begin_step)\n },\n outputs={\"Out\": out})\n return out\n\n def _append_clip_norm(self, grad_var, clip_norm):\n with grad_var.block.program._backward_role_guard():\n return self._clip_by_norm(\n x=grad_var, max_norm=clip_norm, name=grad_var.name)\n\n def _dgc_op(self, param_var, clip_var, grad_var, u_var, v_var, k_var,\n encoded_var):\n block = framework.default_main_program().global_block()\n op_maker = core.op_proto_and_checker_maker\n dgc_op = block.append_op(\n type=\"dgc\",\n inputs={\n \"U\": u_var,\n \"V\": v_var,\n \"Grad\": clip_var,\n \"current_step\": self._global_step_var\n },\n outputs={\n \"U_out\": u_var,\n \"V_out\": v_var,\n \"EncodeGrad\": encoded_var,\n \"k\": k_var,\n \"Grad_out\": grad_var\n },\n attrs={\n \"m\": self._momentum,\n \"sparsity\": self._sparsity,\n \"use_nesterov\": self._use_nesterov,\n \"rampup_begin_step\": float(self._rampup_begin_step),\n \"rampup_step\": float(self._rampup_step)\n },\n stop_gradient=True)\n\n backward = op_maker.OpRole.Backward\n dgc_op._set_attr(op_maker.kOpRoleAttrName(), backward)\n dgc_op._set_attr(op_maker.kOpRoleVarAttrName(),\n [param_var.name, grad_var.name])\n\n\nclass LarsMomentumOptimizer(Optimizer):\n \"\"\"\n Momentum optimizer with LARS support\n\n The update equations are as follows:\n\n .. math::\n\n & local\\_learning\\_rate = learning\\_rate * lars\\_coeff * \\\\\n \\\\frac{||param||}{||gradient|| + lars\\_weight\\_decay * ||param||}\n\n & velocity = mu * velocity + local\\_learning\\_rate * (gradient + lars\\_weight\\_decay * param)\n\n & param = param - velocity\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n momentum (float): momentum factor\n lars_coeff (float): defines how much we trust the layer to change its weights.\n lars_weight_decay (float): weight decay coefficient for decaying using LARS.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(inp, size=3)\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.LarsMomentumOptimizer(learning_rate=0.001, momentum=0.9)\n optimizer.minimize(out)\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n exe.run(\n feed={\"inp\": np_inp},\n fetch_list=[out.name])\n \"\"\"\n _velocity_acc_str = \"velocity\"\n\n def __init__(self,\n learning_rate,\n momentum,\n lars_coeff=0.001,\n lars_weight_decay=0.0005,\n regularization=None,\n name=None):\n assert learning_rate is not None\n assert momentum is not None\n super(LarsMomentumOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"lars_momentum\"\n self._momentum = momentum\n self._lars_coeff = float(lars_coeff)\n self._lars_weight_decay = float(lars_weight_decay)\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n for p in parameters:\n self._add_accumulator(self._velocity_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n velocity_acc = self._get_accumulator(self._velocity_acc_str,\n param_and_grad[0])\n # create the momentum optimize op\n momentum_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Velocity\": velocity_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"VelocityOut\": velocity_acc\n },\n attrs={\n \"mu\": self._momentum,\n \"lars_coeff\": self._lars_coeff,\n \"lars_weight_decay\": self._lars_weight_decay\n },\n stop_gradient=True)\n\n return momentum_op\n\n\nclass AdagradOptimizer(Optimizer):\n \"\"\"\n **Adaptive Gradient Algorithm (Adagrad)**\n\n The update is done as follows:\n\n .. math::\n\n moment\\_out &= moment + grad * grad\n\n param\\_out &= param - \\\\frac{learning\\_rate * grad}{\\sqrt{moment\\_out} + \\epsilon}\n\n The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\n does not have the epsilon attribute. It is added here in our implementation\n as also proposed here: http://cs231n.github.io/neural-networks-3/#ada\n for numerical stability to avoid the division by zero error.\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n epsilon (float): a small float value for numerical stability.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n initial_accumulator_value (float): Initial value for moment accumulator.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy as np\n\n np_inp = np.array([[1.0, 2.0], [3.0, 4.0]], dtype=np.float32)\n inp = fluid.layers.data(\n name=\"inp\", shape=[2, 2], append_batch_size=False)\n out = fluid.layers.fc(inp, size=3)\n out = fluid.layers.reduce_sum(out)\n optimizer = fluid.optimizer.Adagrad(learning_rate=0.2)\n optimizer.minimize(out)\n\n exe = fluid.Executor(fluid.CPUPlace())\n exe.run(fluid.default_startup_program())\n exe.run(\n feed={\"inp\": np_inp},\n fetch_list=[out.name])\n \"\"\"\n _moment_acc_str = \"moment\"\n\n def __init__(self,\n learning_rate,\n epsilon=1.0e-6,\n regularization=None,\n name=None,\n initial_accumulator_value=0.0):\n assert learning_rate is not None\n assert epsilon is not None\n super(AdagradOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"adagrad\"\n self._epsilon = epsilon\n self.initial_accumulator_value = initial_accumulator_value\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n for p in parameters:\n self._add_accumulator(self._moment_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n moment_acc = self._get_accumulator(self._moment_acc_str,\n param_and_grad[0])\n startup_block = framework.default_startup_program().global_block()\n startup_block.append_op(\n type='fill_constant',\n inputs={},\n outputs={'Out': [moment_acc]},\n attrs={\n 'dtype': moment_acc.dtype,\n 'value': self.initial_accumulator_value,\n 'shape': moment_acc.shape,\n })\n\n # Create the adagrad optimizer op\n adagrad_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Moment\": moment_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\"ParamOut\": param_and_grad[0],\n \"MomentOut\": moment_acc},\n attrs={\"epsilon\": self._epsilon},\n stop_gradient=True)\n\n return adagrad_op\n\n\nclass AdamOptimizer(Optimizer):\n \"\"\"\n This implements the Adam optimizer from Section 2 of the Adam\n paper : https://arxiv.org/abs/1412.6980.\n Adam is a first-order gradient-based optimization method based on\n adaptive estimates of lower-order moments.\n\n Adam updates:\n\n .. math::\n\n t & = t + 1\n\n moment\\_1\\_out & = {\\\\beta}_1 * moment\\_1 + (1 - {\\\\beta}_1) * grad\n\n moment\\_2\\_out & = {\\\\beta}_2 * moment\\_2 + (1 - {\\\\beta}_2) * grad * grad\n\n learning\\_rate & = learning\\_rate * \\\\\n \\\\frac{\\sqrt{1 - {\\\\beta}_2^t}}{1 - {\\\\beta}_1^t}\n\n param\\_out & = param - learning\\_rate * \\\\frac{moment\\_1}{\\sqrt{moment\\_2} + \\epsilon}\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n beta1 (float): The exponential decay rate for the 1st moment estimates.\n beta2 (float): The exponential decay rate for the 2nd moment estimates.\n epsilon (float): a small float value for numerical stability.\n regularization: A Regularizer, such as fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n lazy_mode(bool: false): The official Adam algorithm has two moving-average accumulators\n the accumulators are updated at every step. Every element of the two moving-average is updated\n in both dense mode and sparse mode. If the size of parameter is very large, then the update\n may be very slow. The lazy mode only update the element that has gradient is the current\n mini-batch, so it will be much more faster. But this mode has different semantics with the\n original Adam algorithm and may lead to different result.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n adam_optimizer = fluid.optimizer.AdamOptimizer(0.01)\n adam_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n \"\"\"\n _moment1_acc_str = \"moment1\"\n _moment2_acc_str = \"moment2\"\n _beta1_pow_acc_str = \"beta1_pow_acc\"\n _beta2_pow_acc_str = \"beta2_pow_acc\"\n\n def __init__(self,\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n regularization=None,\n name=None,\n lazy_mode=False):\n assert learning_rate is not None\n assert beta1 is not None\n assert beta2 is not None\n assert epsilon is not None\n super(AdamOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"adam\"\n self._beta1 = beta1\n self._beta2 = beta2\n self._epsilon = epsilon\n self._lazy_mode = lazy_mode\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n # Create accumulator tensors for first and second moments\n for p in parameters:\n self._add_accumulator(self._moment1_acc_str, p)\n self._add_accumulator(self._moment2_acc_str, p)\n self._add_accumulator(\n name=self._beta1_pow_acc_str,\n param=p,\n dtype='float32',\n fill_value=self._beta1,\n shape=[1])\n self._add_accumulator(\n name=self._beta2_pow_acc_str,\n param=p,\n dtype='float32',\n fill_value=self._beta2,\n shape=[1])\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n moment1 = self._get_accumulator(self._moment1_acc_str,\n param_and_grad[0])\n moment2 = self._get_accumulator(self._moment2_acc_str,\n param_and_grad[0])\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param_and_grad[0])\n beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,\n param_and_grad[0])\n\n # create the adam optimize op\n adam_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"LearningRate\": self._create_param_lr(param_and_grad),\n \"Moment1\": moment1,\n \"Moment2\": moment2,\n \"Beta1Pow\": beta1_pow_acc,\n \"Beta2Pow\": beta2_pow_acc\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"Moment1Out\": moment1,\n \"Moment2Out\": moment2\n },\n attrs={\n \"beta1\": self._beta1,\n \"beta2\": self._beta2,\n \"epsilon\": self._epsilon,\n \"lazy_mode\": self._lazy_mode,\n \"min_row_size_to_use_multithread\": 1000\n },\n stop_gradient=True)\n\n return adam_op\n\n def _finish_update(self, block, param_and_grads):\n \"\"\"Update Beta1 and Beta2 Power accumulators\n \"\"\"\n assert isinstance(block, framework.Block)\n main_block = block.program.global_block()\n for param, grad in param_and_grads:\n if grad is None or param.trainable is False:\n continue\n with param.block.program._optimized_guard(\n [param, grad]), name_scope(\"optimizer\"):\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param)\n beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,\n param)\n main_block.append_op(\n type=\"scale\",\n inputs={\"X\": beta1_pow_acc},\n outputs={\"Out\": beta1_pow_acc},\n attrs={\"scale\": self._beta1},\n stop_gradient=True)\n\n main_block.append_op(\n type=\"scale\",\n inputs={\"X\": beta2_pow_acc},\n outputs={\"Out\": beta2_pow_acc},\n attrs={\"scale\": self._beta2},\n stop_gradient=True)\n\n\nclass AdamaxOptimizer(Optimizer):\n \"\"\"\n We implement the Adamax optimizer from Section 7 of the Adam\n paper: https://arxiv.org/abs/1412.6980. Adamax is a variant of the\n Adam algorithm based on the infinity norm.\n\n Adamax updates:\n\n .. math::\n\n t & = t + 1\n\n moment\\_out & = {\\\\beta}_1 * moment + (1 - {\\\\beta}_1) * grad\n\n inf\\_norm\\_out & = max({\\\\beta}_2 * inf\\_norm + \\epsilon, |grad|)\n\n learning\\_rate & = \\\\frac{learning\\_rate}{1 - {\\\\beta}_1^t}\n\n param\\_out & = param - learning\\_rate * \\\\frac{moment\\_out}{inf\\_norm\\_out}\n\n\n The original paper does not have an epsilon attribute.\n However, it is added here for numerical stability to prevent the\n division by 0 error.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n # First create the Executor.\n place = fluid.CPUPlace() # fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(train_program, startup_program):\n data = fluid.layers.data(name='X', shape=[1], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n adam = fluid.optimizer.Adamax(learning_rate=0.2)\n adam.minimize(loss)\n\n # Run the startup program once and only once.\n exe.run(startup_program)\n\n x = numpy.random.random(size=(10, 1)).astype('float32')\n outs = exe.run(program=train_program,\n feed={'X': x},\n fetch_list=[loss.name])\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n beta1 (float): The exponential decay rate for the 1st moment estimates.\n beta2 (float): The exponential decay rate for the 2nd moment estimates.\n epsilon (float): a small float value for numerical stability.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Notes:\n Currently, AdamaxOptimizer doesn't support sparse parameter optimization.\n \"\"\"\n _moment_acc_str = \"moment\"\n _inf_norm_acc_str = \"inf_norm\"\n _beta1_pow_acc_str = \"beta1_pow_acc\"\n\n def __init__(self,\n learning_rate=0.001,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-8,\n regularization=None,\n name=None):\n assert learning_rate is not None\n assert beta1 is not None\n assert beta2 is not None\n assert epsilon is not None\n super(AdamaxOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"adamax\"\n self._beta1 = beta1\n self._beta2 = beta2\n self._epsilon = epsilon\n\n def _create_accumulators(self, block, parameters):\n # Create accumulator tensors for first moment and infinity norm\n for p in parameters:\n self._add_accumulator(self._moment_acc_str, p)\n self._add_accumulator(self._inf_norm_acc_str, p)\n self._add_accumulator(\n name=self._beta1_pow_acc_str,\n param=p,\n dtype='float32',\n fill_value=self._beta1,\n shape=[1])\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n moment = self._get_accumulator(self._moment_acc_str, param_and_grad[0])\n inf_norm = self._get_accumulator(self._inf_norm_acc_str,\n param_and_grad[0])\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param_and_grad[0])\n # create the adamax optimize op\n adamax_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"LearningRate\": self._create_param_lr(param_and_grad),\n \"Moment\": moment,\n \"InfNorm\": inf_norm,\n \"Beta1Pow\": beta1_pow_acc\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"MomentOut\": moment,\n \"InfNormOut\": inf_norm\n },\n attrs={\n \"beta1\": self._beta1,\n \"beta2\": self._beta2,\n \"epsilon\": self._epsilon\n },\n stop_gradient=True)\n\n return adamax_op\n\n def _finish_update(self, block, parameters_and_grads):\n \"\"\"Update Beta1 Power accumulator\n \"\"\"\n assert isinstance(block, framework.Block)\n main_block = block.program.global_block()\n for param, grad in parameters_and_grads:\n if grad is None or param.trainable is False:\n continue\n with param.block.program._optimized_guard(\n [param, grad]), name_scope('adamx'):\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param)\n main_block.append_op(\n type=\"scale\",\n inputs={\"X\": beta1_pow_acc},\n outputs={\"Out\": beta1_pow_acc},\n attrs={\"scale\": self._beta1},\n stop_gradient=True)\n\n\nclass DecayedAdagradOptimizer(Optimizer):\n \"\"\"\n **Decayed Adagrad Optimizer**\n\n The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\n\n The update is done as follows:\n\n .. math::\n\n moment\\_out & = decay * moment + (1 - decay) * grad * grad\n\n param\\_out & = param - \\\\frac{learning\\_rate * grad}{\\sqrt{moment\\_out} + \\epsilon}\n\n The original paper(http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf)\n does not have an epsilon attribute. It is added here for numerical\n stability to avoid the division by zero error.\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one float value as data element.\n decay (float): decay rate.\n epsilon (float): a small float value for numerical stability.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle.fluid.layers as layers\n from paddle.fluid.optimizer import DecayedAdagrad\n\n x = layers.data( name='x', shape=[-1, 10], dtype='float32' )\n trans = layers.fc( x, 100 )\n cost = layers.reduce_mean( trans )\n optimizer = fluid.optimizer.DecayedAdagrad(learning_rate=0.2)\n optimizer.minimize(cost)\n\n Notes:\n Currently, DecayedAdagradOptimizer doesn't support sparse parameter optimization.\n \"\"\"\n _moment_acc_str = \"moment\"\n\n def __init__(self,\n learning_rate,\n decay=0.95,\n epsilon=1.0e-6,\n regularization=None,\n name=None):\n assert learning_rate is not None\n assert decay is not None\n assert epsilon is not None\n\n super(DecayedAdagradOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"decayed_adagrad\"\n self._decay = decay\n self._epsilon = epsilon\n\n def _create_accumulators(self, block, parameters):\n assert isinstance(block, framework.Block)\n\n for p in parameters:\n self._add_accumulator(self._moment_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n\n moment_acc = self._get_accumulator(self._moment_acc_str,\n param_and_grad[0])\n\n # Create the decayed adagrad optimizer op\n decayed_adagrad_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Moment\": moment_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad)\n },\n outputs={\"ParamOut\": param_and_grad[0],\n \"MomentOut\": moment_acc},\n attrs={\"epsilon\": self._epsilon},\n stop_gradient=True)\n\n return decayed_adagrad_op\n\n\nclass AdadeltaOptimizer(Optimizer):\n \"\"\"\n **Adadelta Optimizer**\n\n Simple Adadelta optimizer with average squared grad state and\n average squared update state.\n The details of adadelta please refer to this\n `ADADELTA: AN ADAPTIVE LEARNING RATE METHOD\n <http://www.matthewzeiler.com/pubs/googleTR2012/googleTR2012.pdf>`_.\n\n .. math::\n\n E(g_t^2) &= \\\\rho * E(g_{t-1}^2) + (1-\\\\rho) * g^2 \\\\\\\\\n learning\\\\_rate &= sqrt( ( E(dx_{t-1}^2) + \\\\epsilon ) / ( \\\\\n E(g_t^2) + \\\\epsilon ) ) \\\\\\\\\n E(dx_t^2) &= \\\\rho * E(dx_{t-1}^2) + (1-\\\\rho) * (-g*learning\\\\_rate)^2\n\n Args:\n learning_rate(float): global learning rate\n rho(float): rho in equation\n epsilon(float): epsilon in equation\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n optimizer = fluid.optimizer.Adadelta(\n learning_rate=0.0003, epsilon=1.0e-6, rho=0.95)\n _, params_grads = optimizer.minimize(cost)\n\n Notes:\n Currently, AdadeltaOptimizer doesn't support sparse parameter optimization.\n \"\"\"\n\n _avg_squared_grad_acc_str = \"_avg_squared_grad\"\n _avg_squared_update_acc_str = \"_avg_squared_update\"\n\n def __init__(self,\n learning_rate,\n epsilon=1.0e-6,\n rho=0.95,\n regularization=None,\n name=None):\n if learning_rate is None:\n raise ValueError(\"learning_rate is not set.\")\n if epsilon is None:\n raise ValueError(\"epsilon is not set.\")\n if rho is None:\n raise ValueError(\"rho is not set.\")\n super(AdadeltaOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n self.type = \"adadelta\"\n self._epsilon = epsilon\n self._rho = rho\n\n def _create_accumulators(self, block, parameters):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n for p in parameters:\n self._add_accumulator(self._avg_squared_grad_acc_str, p)\n self._add_accumulator(self._avg_squared_update_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n avg_squared_grad_acc = self._get_accumulator(\n self._avg_squared_grad_acc_str, param_and_grad[0])\n avg_squared_update_acc = self._get_accumulator(\n self._avg_squared_update_acc_str, param_and_grad[0])\n\n # Create the adadelta optimizer op\n adadelta_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"AvgSquaredGrad\": avg_squared_grad_acc,\n \"AvgSquaredUpdate\": avg_squared_update_acc\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"AvgSquaredGradOut\": avg_squared_grad_acc,\n \"AvgSquaredUpdateOut\": avg_squared_update_acc\n },\n attrs={\"epsilon\": self._epsilon,\n \"rho\": self._rho},\n stop_gradient=True)\n\n return adadelta_op\n\n\nclass RMSPropOptimizer(Optimizer):\n \"\"\"\n Root Mean Squared Propagation (RMSProp) is an unpublished, adaptive learning\n rate method. The original slides proposed RMSProp: Slide 29 of\n http://www.cs.toronto.edu/~tijmen/csc321/slides/lecture_slides_lec6.pdf .\n\n The original equation is as follows:\n\n .. math::\n\n r(w, t) & = \\\\rho r(w, t-1) + (1 - \\\\rho)(\\\\nabla Q_{i}(w))^2\n\n w & = w - \\\\frac{\\\\eta} {\\\\sqrt{r(w,t) + \\\\epsilon}} \\\\nabla Q_{i}(w)\n\n The first equation calculates moving average of the squared gradient for\n each weight. Then dividing the gradient by :math:`sqrt{v(w,t)}`.\n\n In some cases, adding a momentum term :math: `\\\\beta` is beneficial.\n In our implementation, Nesterov momentum is used:\n\n .. math::\n\n r(w, t) & = \\\\rho r(w, t-1) + (1 - \\\\rho)(\\\\nabla Q_{i}(w))^2\n\n v(w, t) & = \\\\beta v(w, t-1) + \\\\frac{\\\\eta} {\\\\sqrt{r(w,t) +\n \\\\epsilon}} \\\\nabla Q_{i}(w)\n\n w & = w - v(w, t)\n\n if centered is True:\n\n .. math::\n\n r(w, t) & = \\\\rho r(w, t-1) + (1 - \\\\rho)(\\\\nabla Q_{i}(w))^2\n\n g(w, t) & = \\\\rho g(w, t-1) + (1 - \\\\rho)\\\\nabla Q_{i}(w)\n\n v(w, t) & = \\\\beta v(w, t-1) + \\\\frac{\\\\eta} {\\\\sqrt{r(w,t) - (g(w, t))^2 +\n \\\\epsilon}} \\\\nabla Q_{i}(w)\n\n w & = w - v(w, t)\n\n where, :math:`\\\\rho` is a hyperparameter and typical values are 0.9, 0.95\n and so on. :math: `beta` is the momentum term. :math: `\\\\epsilon` is a\n smoothing term to avoid division by zero, usually set somewhere in range\n from 1e-4 to 1e-8.\n\n\n Args:\n learning_rate(float): global learning rate.\n rho(float): rho is :math: `\\\\rho` in equation, set 0.95 by default.\n epsilon(float): :math: `\\\\epsilon` in equation is smoothing term to\n avoid division by zero, set 1e-6 by default.\n momentum(float): :math:`\\\\beta` in equation is the momentum term,\n set 0.0 by default.\n centered(bool): If True, gradients are normalized by the estimated variance of\n the gradient; if False, by the uncentered second moment. Setting this to\n True may help with training, but is slightly more expensive in terms of\n computation and memory. Defaults to False.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Raises:\n ValueError: If learning_rate, rho, epsilon, momentum are None.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n rms_optimizer = fluid.optimizer.RMSProp(learning_rate=0.1)\n rms_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n \"\"\"\n\n _momentum_acc_str = \"momentum\"\n _mean_square_acc_str = \"mean_square\"\n _mean_grad_acc_str = \"mean_grad\"\n\n def __init__(self,\n learning_rate,\n rho=0.95,\n epsilon=1.0e-6,\n momentum=0.0,\n centered=False,\n regularization=None,\n name=None):\n super(RMSPropOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n if learning_rate is None:\n raise ValueError(\"learning_rate is not set.\")\n if rho is None:\n raise ValueError(\"rho is not set.\")\n if epsilon is None:\n raise ValueError(\"epsilon is not set.\")\n if momentum is None:\n raise ValueError(\"momentum is not set.\")\n\n self.type = \"rmsprop\"\n self._rho = rho\n self._epsilon = epsilon\n self._momentum = momentum\n self._centered = centered\n\n def _create_accumulators(self, block, parameters):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n for p in parameters:\n self._add_accumulator(self._momentum_acc_str, p)\n self._add_accumulator(self._mean_square_acc_str, p)\n self._add_accumulator(self._mean_grad_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n momentum_acc = self._get_accumulator(self._momentum_acc_str,\n param_and_grad[0])\n mean_square_acc = self._get_accumulator(self._mean_square_acc_str,\n param_and_grad[0])\n mean_grad_acc = self._get_accumulator(self._mean_grad_acc_str,\n param_and_grad[0])\n rmsprop_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"Moment\": momentum_acc,\n \"MeanSquare\": mean_square_acc,\n \"MeanGrad\": mean_grad_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad),\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"MomentOut\": momentum_acc,\n \"MeanSquareOut\": mean_square_acc,\n \"MeanGradOut\": mean_grad_acc\n },\n attrs={\n \"epsilon\": self._epsilon,\n \"decay\": self._rho,\n \"momentum\": self._momentum,\n \"centered\": self._centered\n },\n stop_gradient=True)\n\n return rmsprop_op\n\n\nclass FtrlOptimizer(Optimizer):\n \"\"\"\n FTRL (Follow The Regularized Leader) Optimizer.\n\n The paper that proposed Follow The Regularized Leader (FTRL):\n (https://www.eecs.tufts.edu/~dsculley/papers/ad-click-prediction.pdf)\n\n .. math::\n\n &new\\_accum = squared\\_accum + grad^2\n\n &if (lr\\_power == -0.5):\n\n &\\quad linear\\_accum += grad - \\\\frac{\\\\sqrt{new\\_accum} - \\\\sqrt{squared\\_accum}}{learning\\_rate * param}\n\n &else:\n\n &\\quad linear\\_accum += grad - \\\\frac{new\\_accum^{-lr\\_power} - accum^{-lr\\_power}}{learning\\_rate * param}\n\n\n &x = l1 * sign(linear\\_accum) - linear\\_accum\n\n &if (lr\\_power == -0.5):\n\n &\\quad y = \\\\frac{\\\\sqrt{new\\_accum}}{learning\\_rate} + (2 * l2)\n\n &\\quad pre\\_shrink = \\\\frac{x}{y}\n\n &\\quad param = (abs(linear\\_accum) > l1).select(pre\\_shrink, 0.0)\n\n &else:\n\n &\\quad y = \\\\frac{new\\_accum^{-lr\\_power}}{learning\\_rate} + (2 * l2)\n\n &\\quad pre\\_shrink = \\\\frac{x}{y}\n\n &\\quad param = (abs(linear\\_accum) > l1).select(pre\\_shrink, 0.0)\n\n &squared\\_accum += grad^2\n\n Args:\n learning_rate (float|Variable): global learning rate.\n l1 (float): L1 regularization strength.\n l2 (float): L2 regularization strength.\n lr_power (float): Learning Rate Power.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Raises:\n ValueError: If learning_rate, rho, epsilon, momentum are None.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n place = fluid.CPUPlace()\n main = fluid.Program()\n with fluid.program_guard(main):\n x = fluid.layers.data(name='x', shape=[13], dtype='float32')\n y = fluid.layers.data(name='y', shape=[1], dtype='float32')\n y_predict = fluid.layers.fc(input=x, size=1, act=None)\n cost = fluid.layers.square_error_cost(input=y_predict, label=y)\n avg_cost = fluid.layers.mean(cost)\n\n ftrl_optimizer = fluid.optimizer.Ftrl(learning_rate=0.1)\n ftrl_optimizer.minimize(avg_cost)\n\n fetch_list = [avg_cost]\n train_reader = paddle.batch(\n paddle.dataset.uci_housing.train(), batch_size=1)\n feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n for data in train_reader():\n exe.run(main, feed=feeder.feed(data), fetch_list=fetch_list)\n\n Notes:\n Currently, FtrlOptimizer doesn't support sparse parameter optimization.\n \"\"\"\n\n _squared_acc_str = \"squared\"\n _linear_acc_str = \"linear\"\n\n def __init__(self,\n learning_rate,\n l1=0.0,\n l2=0.0,\n lr_power=-0.5,\n regularization=None,\n name=None):\n super(FtrlOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n name=name)\n if learning_rate is None:\n raise ValueError(\"learning_rate is not set.\")\n\n self.type = \"ftrl\"\n self._l1 = l1\n self._l2 = l2\n self._lr_power = lr_power\n\n def _create_accumulators(self, block, parameters):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n for p in parameters:\n self._add_accumulator(self._squared_acc_str, p)\n self._add_accumulator(self._linear_acc_str, p)\n\n def _append_optimize_op(self, block, param_and_grad):\n if not isinstance(block, framework.Block):\n raise TypeError(\"block is not instance of framework.Block.\")\n\n squared_acc = self._get_accumulator(self._squared_acc_str,\n param_and_grad[0])\n linear_acc = self._get_accumulator(self._linear_acc_str,\n param_and_grad[0])\n ftrl_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"SquaredAccumulator\": squared_acc,\n \"LinearAccumulator\": linear_acc,\n \"LearningRate\": self._create_param_lr(param_and_grad),\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"SquaredAccumOut\": squared_acc,\n \"LinearAccumOut\": linear_acc\n },\n attrs={\"l1\": self._l1,\n \"l2\": self._l1,\n \"lr_power\": self._lr_power},\n stop_gradient=True)\n\n return ftrl_op\n\n\nclass LambOptimizer(AdamOptimizer):\n \"\"\"\n LAMB (Layer-wise Adaptive Moments optimizer for Batching training) Optimizer.\n\n LAMB Optimizer is designed to scale up the batch size of training without losing \n accuracy, which supports adaptive element-wise updating and accurate layer-wise \n correction. For more information, please refer to `Large Batch Optimization for \n Deep Learning: Training BERT in 76 minutes <https://arxiv.org/abs/1904.00962>`_ .\n\n The updating of parameters follows:\n\n .. math::\n\n m_t &= \\\\beta_1 m_{t - 1}+ (1 - \\\\beta_1)g_t \\\\\n\n v_t &= \\\\beta_2 v_{t - 1} + (1 - \\\\beta_2)g_t^2 \\\\\n\n r_t &= \\\\frac{m_t}{\\\\sqrt{v_t}+\\\\epsilon} \\\\\n\n w_t &= w_{t-1} -\\\\eta_t \\\\frac{\\\\left \\| w_{t-1}\\\\right \\|}{\\\\left \\| r_t + \\\\lambda w_{t-1}\\\\right \\|} (r_t + \\\\lambda w_{t-1})\n\n\n where :math:`m` is the 1st moment, and :math:`v` the 2nd moment, :math:`\\\\eta` the \n learning rate, :math:`\\\\lambda` the LAMB weight decay rate.\n\n Args:\n learning_rate (float|Variable): the learning rate used to update parameters. \\\n Can be a float value or a Variable with one \\\n float value as data element.\n lamb_weight_decay (float): The LAMB weight decay rate.\n beta1 (float): The exponential decay rate for the 1st moment estimates.\n beta2 (float): The exponential decay rate for the 2nd moment estimates.\n epsilon (float): A small float value for numerical stability.\n regularization (Regularizer): A Regularizer, such as\n fluid.regularizer.L1DecayRegularizer.\n exclude_from_weight_decay_fn (function): Exclude a parameter from weight \n decay when **exclude_from_weight_decay_fn(parameter)** returns true.\n name (str|None): An optional name prefix.\n\n Examples:\n .. code-block:: python\n \n import paddle.fluid as fluid \n\n data = fluid.layers.data(name='x', shape=[5], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n cost = fluid.layers.mean(hidden)\n\n def exclude_fn(param):\n return param.name.endswith('.b_0')\n\n optimizer = fluid.optimizer.Lamb(learning_rate=0.002,\n exclude_from_weight_decay_fn=exclude_fn)\n optimizer.minimize(cost)\n \"\"\"\n _moment1_acc_str = \"moment1\"\n _moment2_acc_str = \"moment2\"\n # these two not used in op temporarily\n _beta1_pow_acc_str = \"beta1_pow_acc\"\n _beta2_pow_acc_str = \"beta2_pow_acc\"\n\n def __init__(self,\n learning_rate=0.001,\n lamb_weight_decay=0.01,\n beta1=0.9,\n beta2=0.999,\n epsilon=1e-6,\n regularization=None,\n exclude_from_weight_decay_fn=None,\n name=None):\n assert learning_rate is not None\n assert lamb_weight_decay is not None\n assert beta1 is not None\n assert beta2 is not None\n assert epsilon is not None\n super(LambOptimizer, self).__init__(\n learning_rate=learning_rate,\n regularization=regularization,\n beta1=beta1,\n beta2=beta2,\n epsilon=epsilon,\n name=name)\n self.type = \"lamb\"\n self._weight_decay = lamb_weight_decay\n self._exclude_from_weight_decay_fn = exclude_from_weight_decay_fn\n\n def _append_optimize_op(self, block, param_and_grad):\n assert isinstance(block, framework.Block)\n block.program._use_lamb = True\n\n moment1 = self._get_accumulator(self._moment1_acc_str,\n param_and_grad[0])\n moment2 = self._get_accumulator(self._moment2_acc_str,\n param_and_grad[0])\n beta1_pow_acc = self._get_accumulator(self._beta1_pow_acc_str,\n param_and_grad[0])\n beta2_pow_acc = self._get_accumulator(self._beta2_pow_acc_str,\n param_and_grad[0])\n\n if self._exclude_from_weight_decay_fn is not None \\\n and self._exclude_from_weight_decay_fn(param_and_grad[0]):\n weight_decay = 0.0\n else:\n weight_decay = self._weight_decay\n\n # create the lamb optimize op\n lamb_op = block.append_op(\n type=self.type,\n inputs={\n \"Param\": param_and_grad[0],\n \"Grad\": param_and_grad[1],\n \"LearningRate\": self._create_param_lr(param_and_grad),\n \"Moment1\": moment1,\n \"Moment2\": moment2,\n \"Beta1Pow\": beta1_pow_acc,\n \"Beta2Pow\": beta2_pow_acc\n },\n outputs={\n \"ParamOut\": param_and_grad[0],\n \"Moment1Out\": moment1,\n \"Moment2Out\": moment2\n },\n attrs={\n \"beta1\": self._beta1,\n \"beta2\": self._beta2,\n \"epsilon\": self._epsilon,\n \"weight_decay\": weight_decay\n },\n stop_gradient=True)\n\n return lamb_op\n\n\n# We short the class name, since users will use the optimizer with the package\n# name. The sample code:\n#\n# import paddle.fluid as fluid\n#\n# sgd = fluid.optimizer.SGD(...)\n#\n# It is no need to add an `Optimizer` as the class suffix\nSGD = SGDOptimizer\nMomentum = MomentumOptimizer\nAdagrad = AdagradOptimizer\nAdam = AdamOptimizer\nAdamax = AdamaxOptimizer\nDecayedAdagrad = DecayedAdagradOptimizer\nAdadelta = AdadeltaOptimizer\nRMSProp = RMSPropOptimizer\nFtrl = FtrlOptimizer\nLarsMomentum = LarsMomentumOptimizer\nLamb = LambOptimizer\n\n\nclass ModelAverage(Optimizer):\n \"\"\"Accumulate the average of parameters within sliding window. The average\n result will be saved in temporary variables which can be applied to\n parameter variables of current model by calling 'apply()' method. And the\n 'restore()' method is used to restore the parameter values of current model.\n\n The size of average window is determined by average_window_rate,\n min_average_window, max_average_window and current update times.\n\n Args:\n average_window_rate: The rate of average window.\n min_average_window: The minimum size of average window.\n max_average_window: The maximum size of average window.\n regularization: A Regularizer, such as\n fluid.regularizer.L2DecayRegularizer.\n name: A optional name prefix.\n\n Examples:\n\n .. code-block:: python\n\n import paddle.fluid as fluid\n import numpy\n\n # First create the Executor.\n place = fluid.CPUPlace() # fluid.CUDAPlace(0)\n exe = fluid.Executor(place)\n\n train_program = fluid.Program()\n startup_program = fluid.Program()\n with fluid.program_guard(train_program, startup_program):\n # build net\n data = fluid.layers.data(name='X', shape=[1], dtype='float32')\n hidden = fluid.layers.fc(input=data, size=10)\n loss = fluid.layers.mean(hidden)\n optimizer = fluid.optimizer.Momentum(learning_rate=0.2, momentum=0.1)\n optimizer.minimize(loss)\n\n # build ModelAverage optimizer\n model_average = fluid.optimizer.ModelAverage(0.15,\n min_average_window=10000,\n max_average_window=20000)\n\n exe.run(startup_program)\n x = numpy.random.random(size=(10, 1)).astype('float32')\n outs = exe.run(program=train_program,\n feed={'X': x},\n fetch_list=[loss.name])\n\n # apply ModelAverage\n with model_average.apply(exe):\n x = numpy.random.random(size=(10, 1)).astype('float32')\n exe.run(program=train_program,\n feed={'X': x},\n fetch_list=[loss.name])\n \"\"\"\n\n def __init__(self,\n average_window_rate,\n min_average_window=10000,\n max_average_window=10000,\n regularization=None,\n name=None):\n super(ModelAverage, self).__init__(\n 0.0, regularization=regularization, name=name)\n self.average_window = average_window_rate\n self.min_average_window = min_average_window\n self.max_average_window = max_average_window\n\n self.params_grads = []\n for param in framework.default_main_program().global_block(\n ).all_parameters():\n if param.do_model_average != False:\n grad = param.block.create_var(\n name=unique_name.generate_with_ignorable_key(\".\".join(\n [param.name, 'tmp'])),\n dtype=param.dtype,\n persistable=False,\n stop_gradient=True)\n self.params_grads.append((param, grad))\n\n for param, grad in self.params_grads:\n if grad is None:\n continue\n with param.block.program._optimized_guard(\n [param, grad]), name_scope('move_average'):\n self._append_average_accumulate_op(param)\n\n self.apply_program = Program()\n block = self.apply_program.global_block()\n with program_guard(main_program=self.apply_program):\n for param_grad in self.params_grads:\n self._add_average_apply_op(block, param_grad)\n\n self.restore_program = Program()\n block = self.restore_program.global_block()\n with program_guard(main_program=self.restore_program):\n for param_grad in self.params_grads:\n self._add_average_restore_op(block, param_grad)\n\n def _add_average_apply_op(self, block, param_grad):\n param = block._clone_variable(param_grad[0])\n grad = block._clone_variable(param_grad[1])\n sum_1 = block._clone_variable(self._get_accumulator('sum_1', param))\n sum_2 = block._clone_variable(self._get_accumulator('sum_2', param))\n sum_3 = block._clone_variable(self._get_accumulator('sum_3', param))\n num_accumulates = block._clone_variable(\n self._get_accumulator('num_accumulates', param))\n old_num_accumulates = block._clone_variable(\n self._get_accumulator('old_num_accumulates', param))\n num_updates = block._clone_variable(\n self._get_accumulator('num_updates', param))\n # backup param value to grad\n layers.assign(input=param, output=grad)\n # param = (sum_1 + sum_2 + sum_3) / (num_accumulates + old_num_accumulates)\n tmp = layers.sum(x=[num_accumulates, old_num_accumulates])\n sum = layers.sum(x=[sum_1, sum_2, sum_3])\n tmp = layers.cast(\n x=tmp, dtype='float32' if self._dtype == None else self._dtype)\n sum = layers.cast(\n x=sum, dtype='float32' if self._dtype == None else self._dtype)\n ops._elementwise_div(x=sum, y=tmp, out=param)\n\n def _add_average_restore_op(self, block, param_grad):\n param = block._clone_variable(param_grad[0])\n grad = block._clone_variable(param_grad[1])\n layers.assign(input=grad, output=param)\n\n def _append_average_accumulate_op(self, param):\n self.helper = LayerHelper(\"average_accumulate\")\n sum_1 = self._add_accumulator('sum_1', param)\n sum_2 = self._add_accumulator('sum_2', param)\n sum_3 = self._add_accumulator('sum_3', param)\n num_accumulates = self._add_accumulator(\n 'num_accumulates', param, dtype='int64', shape=[1])\n old_num_accumulates = self._add_accumulator(\n 'old_num_accumulates', param, dtype='int64', shape=[1])\n num_updates = self._add_accumulator(\n 'num_updates', param, dtype='int64', shape=[1])\n\n self.helper.append_op(\n type='average_accumulates',\n inputs={\n \"param\": param,\n \"in_sum_1\": sum_1,\n \"in_sum_2\": sum_2,\n \"in_sum_3\": sum_3,\n \"in_num_accumulates\": num_accumulates,\n \"in_old_num_accumulates\": old_num_accumulates,\n \"in_num_updates\": num_updates\n },\n outputs={\n \"out_sum_1\": sum_1,\n \"out_sum_2\": sum_2,\n \"out_sum_3\": sum_3,\n \"out_num_accumulates\": num_accumulates,\n \"out_old_num_accumulates\": old_num_accumulates,\n \"out_num_updates\": num_updates,\n },\n attrs={\n \"average_window\": self.average_window,\n \"min_average_window\": self.min_average_window,\n \"max_average_window\": self.max_average_window,\n },\n stop_gradient=True)\n\n @signature_safe_contextmanager\n def apply(self, executor, need_restore=True):\n \"\"\"Apply average values to parameters of current model.\n\n Args:\n executor(fluid.Executor): current executor.\n need_restore(bool): If you finally need to do restore, set it to True. Default is True.\n \"\"\"\n executor.run(self.apply_program)\n try:\n yield\n finally:\n if need_restore:\n self.restore(executor)\n\n def restore(self, executor):\n \"\"\"Restore parameter values of current model.\n \n Args:\n executor(fluid.Executor): current executor.\n \"\"\"\n executor.run(self.restore_program)\n\n\nclass ExponentialMovingAverage(object):\n \"\"\"\n Compute the moving average of parameters with exponential decay.\n Given a parameter :math:`\\\\theta`, its exponential moving average (EMA)\n will be\n\n .. math::\n\n \\\\text{EMA}_0 & = 0\n\n\t\\\\text{EMA}_t & = \\\\text{decay} * \\\\text{EMA}_{t-1} + (1 - \\\\text{decay}) * \\\\theta_t\n\n The average results calculated by **update()** method will be saved in \n temporary variables which are created and maintained by the object, and can \n be applied to parameters of current model by calling **apply()** method. And \n the **restore()** method is used to restore the parameters.\n\n **Bias correction**. All EMAs are initialized to :math:`0` and hence they will be \n zero biased, which can be corrected by divided by a factor \n :math:`(1 - \\\\text{decay}^t)` , i.e., the actual EMAs applied to parameters \n when calling **apply()** method would be \n\n .. math::\n \n \\\\widehat{\\\\text{EMA}}_t = \\\\frac{\\\\text{EMA}_t}{1 - \\\\text{decay}^t}\n\n **Decay rate scheduling**. A large decay rate very close to 1 would result \n in that the averages move very slowly. And a better strategy is to set a \n relative smaller decay rate in the very beginning. The argument **thres_steps**\n allows users to pass a Variable to schedule the decay rate, in this case, \n the actual decay rate becomes\n \n .. math::\n \n \\\\min(\\\\text{decay}, \\\\frac{1 + \\\\text{thres_steps}}{10 + \\\\text{thres_steps}})\n\n Usually **thres_steps** can be the global training steps.\n\n\n Args:\n\tdecay (float): The exponential decay rate, usually close to 1, such as \n 0.999, 0.9999, ... .\n thres_steps (Variable|None): If not `None`, schedule the decay rate.\n\tname (str|None): An optional name prefix.\n\n\n Examples:\n\n\t.. code-block:: python\n\n\t import numpy\n\t import paddle\n\t import paddle.fluid as fluid\n\n\t data = fluid.layers.data(name='x', shape=[5], dtype='float32')\n\t hidden = fluid.layers.fc(input=data, size=10)\n\t cost = fluid.layers.mean(hidden)\n\n\t test_program = fluid.default_main_program().clone(for_test=True)\n\n\t optimizer = fluid.optimizer.Adam(learning_rate=0.001)\n\t optimizer.minimize(cost)\n\n\t global_steps = fluid.layers.learning_rate_scheduler._decay_step_counter()\n\t ema = fluid.optimizer.ExponentialMovingAverage(0.999, thres_steps=global_steps)\n\t ema.update()\n\n\t place = fluid.CPUPlace()\n\t exe = fluid.Executor(place)\n\t exe.run(fluid.default_startup_program())\n\n\t for pass_id in range(3):\n\t\tfor batch_id in range(6):\n\t\t data = numpy.random.random(size=(10, 5)).astype('float32')\n\t\t exe.run(program=fluid.default_main_program(),\n\t\t\tfeed={'x': data}, \n\t\t\tfetch_list=[cost.name])\n\n\t\t# usage 1\n\t\twith ema.apply(exe):\n\t\t data = numpy.random.random(size=(10, 5)).astype('float32')\n\t\t exe.run(program=test_program,\n\t\t\t feed={'x': data}, \n\t\t\t fetch_list=[hidden.name])\n\t\t\t \n\n\t\t # usage 2\n\t\twith ema.apply(exe, need_restore=False):\n\t\t data = numpy.random.random(size=(10, 5)).astype('float32')\n\t\t exe.run(program=test_program,\n\t\t\t feed={'x': data}, \n\t\t\t fetch_list=[hidden.name])\n\t\tema.restore(exe)\n \"\"\"\n\n def __init__(self, decay=0.999, thres_steps=None, name=None):\n self._decay = decay\n self._thres_steps = thres_steps\n self._name = name if name is not None else ''\n self._decay_var = self._get_ema_decay()\n\n self._params_tmps = []\n for param in default_main_program().global_block().all_parameters():\n if param.do_model_average != False:\n tmp = param.block.create_var(\n name=unique_name.generate(\".\".join(\n [self._name + param.name, 'ema_tmp'])),\n dtype=param.dtype,\n persistable=False,\n stop_gradient=True)\n self._params_tmps.append((param, tmp))\n\n self._ema_vars = {}\n for param, tmp in self._params_tmps:\n with param.block.program._optimized_guard(\n [param, tmp]), name_scope('moving_average'):\n self._ema_vars[param.name] = self._create_ema_vars(param)\n\n self.apply_program = Program()\n block = self.apply_program.global_block()\n with program_guard(main_program=self.apply_program):\n decay_pow = self._get_decay_pow(block)\n for param, tmp in self._params_tmps:\n param = block._clone_variable(param)\n tmp = block._clone_variable(tmp)\n ema = block._clone_variable(self._ema_vars[param.name])\n layers.assign(input=param, output=tmp)\n # bias correction\n ema = ema / (1.0 - decay_pow)\n layers.assign(input=ema, output=param)\n\n self.restore_program = Program()\n block = self.restore_program.global_block()\n with program_guard(main_program=self.restore_program):\n for param, tmp in self._params_tmps:\n tmp = block._clone_variable(tmp)\n param = block._clone_variable(param)\n layers.assign(input=tmp, output=param)\n\n def _get_ema_decay(self):\n with default_main_program()._lr_schedule_guard():\n decay_var = layers.tensor.create_global_var(\n shape=[1],\n value=self._decay,\n dtype='float32',\n persistable=True,\n name=\"scheduled_ema_decay_rate\")\n\n if self._thres_steps is not None:\n decay_t = (self._thres_steps + 1.0) / (self._thres_steps + 10.0)\n with layers.control_flow.Switch() as switch:\n with switch.case(decay_t < self._decay):\n layers.tensor.assign(decay_t, decay_var)\n with switch.default():\n layers.tensor.assign(\n np.array(\n [self._decay], dtype=np.float32),\n decay_var)\n return decay_var\n\n def _get_decay_pow(self, block):\n global_steps = layers.learning_rate_scheduler._decay_step_counter()\n decay_var = block._clone_variable(self._decay_var)\n decay_pow_acc = layers.elementwise_pow(decay_var, global_steps + 1)\n return decay_pow_acc\n\n def _create_ema_vars(self, param):\n param_ema = layers.create_global_var(\n name=unique_name.generate(self._name + param.name + '_ema'),\n shape=param.shape,\n value=0.0,\n dtype=param.dtype,\n persistable=True)\n\n return param_ema\n\n def update(self):\n \"\"\" \n Update Exponential Moving Average. Should only call this method in \n train program.\n \"\"\"\n param_master_emas = []\n for param, tmp in self._params_tmps:\n with param.block.program._optimized_guard(\n [param, tmp]), name_scope('moving_average'):\n param_ema = self._ema_vars[param.name]\n if param.name + '.master' in self._ema_vars:\n master_ema = self._ema_vars[param.name + '.master']\n param_master_emas.append([param_ema, master_ema])\n else:\n ema_t = param_ema * self._decay_var + param * (\n 1 - self._decay_var)\n layers.assign(input=ema_t, output=param_ema)\n\n # for fp16 params\n for param_ema, master_ema in param_master_emas:\n default_main_program().global_block().append_op(\n type=\"cast\",\n inputs={\"X\": master_ema},\n outputs={\"Out\": param_ema},\n attrs={\n \"in_dtype\": master_ema.dtype,\n \"out_dtype\": param_ema.dtype\n })\n\n @signature_safe_contextmanager\n def apply(self, executor, need_restore=True):\n \"\"\"\n Apply moving average to parameters for evaluation.\n \n Args:\n executor (Executor): The Executor to execute applying.\n need_restore (bool): Whether to restore parameters after applying.\n \"\"\"\n executor.run(self.apply_program)\n try:\n yield\n finally:\n if need_restore:\n self.restore(executor)\n\n def restore(self, executor):\n \"\"\"Restore parameters.\n \n Args:\n executor (Executor): The Executor to execute restoring.\n \"\"\"\n executor.run(self.restore_program)\n\n\nclass PipelineOptimizer(object):\n \"\"\"\n Pipeline Optimizer\n\n Train with pipeline mode. The program will be splited by cut_list. \n\n If the len of cut_list is k, then the whole program (including \\\n backward part) will be splited to 2*k-1 sections. \n \n So the length of place_list and concurrency_list must be also 2*k-1.\n\n Note: Though the asynchronous mode is applied in pipeline training to speed up, \\\n the final performance depends on the training progress of each pipeline heavily.\n\n And we will try the synchronous mode in the future.\n\n Args:\n optimizer (Optimizer): The based optimizer, such as SGD.\n cut_list (list of Variable list): The cut variable of the main_program.\n place_list (list of Place): The place where the section will run on.\n concurrency_list (list of int): The concurrency degree.\n queue_size (int): Each section will consume scopes from its in-scope queue \n and produce scopes to out-scope queue. And this parameter \n specify the scope queue size. [Optional. Default: 30].\n sync_steps (int): The synchronization steps between different cards. [Optional. Default: 1].\n start_cpu_core_id (int): specify the first cpu core id. [Optional. Default:0].\n\n Examples:\n .. code-block:: python\n\n import paddle.fluid as fluid\n import paddle.fluid.layers as layers\n\n x = fluid.layers.data(name='x', shape=[1], dtype='int64', lod_level=0)\n y = fluid.layers.data(name='y', shape=[1], dtype='int64', lod_level=0)\n emb_x = layers.embedding(input=x, param_attr=fluid.ParamAttr(name=\"embx\"), size=[10,2], is_sparse=False)\n emb_y = layers.embedding(input=y, param_attr=fluid.ParamAttr(name=\"emby\",learning_rate=0.9), size=[10,2], is_sparse=False)\n concat = layers.concat([emb_x, emb_y], axis=1)\n fc = layers.fc(input=concat, name=\"fc\", size=1, num_flatten_dims=1, bias_attr=False)\n loss = layers.reduce_mean(fc)\n optimizer = fluid.optimizer.SGD(learning_rate=0.5)\n optimizer = fluid.optimizer.PipelineOptimizer(optimizer,\n cut_list=[[emb_x, emb_y], [loss]],\n place_list=[fluid.CPUPlace(), fluid.CUDAPlace(0), fluid.CPUPlace()],\n concurrency_list=[1, 1, 4],\n queue_size=2,\n sync_steps=1,\n )\n optimizer.minimize(loss)\n place = fluid.CPUPlace()\n exe = fluid.Executor(place)\n exe.run(fluid.default_startup_program())\n filelist = [] # you should set your own filelist, e.g. filelist = [\"dataA.txt\"]\n dataset = fluid.DatasetFactory().create_dataset(\"FileInstantDataset\")\n dataset.set_use_var([x,y])\n dataset.set_batch_size(batch_size)\n dataset.set_filelist(filelist)\n exe.train_from_dataset(\n fluid.default_main_program(),\n dataset,\n thread=2,\n debug=False,\n fetch_list=[],\n fetch_info=[],\n print_period=1)\n \"\"\"\n\n def __init__(self,\n optimizer,\n cut_list=None,\n place_list=None,\n concurrency_list=None,\n queue_size=30,\n sync_steps=1,\n start_cpu_core_id=0):\n # TODO: check properties\n self._optimizer = optimizer\n self._cut_list = cut_list\n self._place_list = place_list\n self._concurrency_list = concurrency_list\n self._queue_size = queue_size\n self._sync_steps = sync_steps\n self._start_cpu_core_id = start_cpu_core_id\n\n def _create_vars(self, block, main_program):\n used_var_set = set()\n for op_idx in range(block.desc.op_size()):\n op_desc = block.desc.op(op_idx)\n vars = op_desc.input_arg_names() + op_desc.output_arg_names()\n for var in vars:\n if var in used_var_set:\n continue\n used_var_set.add(var)\n source_var = main_program.block(0).var(str(var))\n block._clone_variable(source_var, False)\n\n def _extract_section_opt_ops(self, ops, cut_point_name):\n \"\"\"\n Extract opt ops in the given section\n \"\"\"\n output_names = set(cut_point_name)\n relevant_op_flags = [True] * len(ops)\n for i, op in reversed(list(enumerate(ops))):\n if _some_in_set_(op.desc.output_arg_names(), output_names):\n for name in op.desc.input_arg_names():\n output_names.add(name)\n else:\n relevant_op_flags[i] = False\n\n op_path = [ops[i] for i in range(len(ops)) if relevant_op_flags[i]]\n return op_path\n\n def _find_input_output(self, ops, name, is_forward=True):\n \"\"\"\n Find the inputs or outputs of a section\n \"\"\"\n all_set = set()\n part_set = set()\n for op in ops:\n if is_forward:\n part_set.update(op.desc.output_arg_names())\n else:\n part_set.update(op.desc.input_arg_names())\n all_set.update(op.desc.output_arg_names())\n all_set.update(op.desc.input_arg_names())\n return all_set - part_set\n\n def _find_persistable_vars(self, ops, whole_parameters):\n \"\"\"\n find the persistable input vars in current section\n \"\"\"\n res = set()\n for op in ops:\n vars = op.desc.input_arg_names()\n for var in vars:\n if var in whole_parameters:\n res.add(var)\n return res\n\n def _is_opt_role_op(self, op):\n op_maker = core.op_proto_and_checker_maker\n optimize_role = core.op_proto_and_checker_maker.OpRole.Optimize\n if op_maker.kOpRoleAttrName() in op.attr_names and \\\n int(op.all_attrs()[op_maker.kOpRoleAttrName()]) & int(optimize_role) != 0:\n return True\n return False\n\n def _is_lr_role_op(self, op):\n op_maker = core.op_proto_and_checker_maker\n optimize_role = core.op_proto_and_checker_maker.OpRole.LRSched\n if op_maker.kOpRoleAttrName() in op.attr_names and \\\n int(op.all_attrs()[op_maker.kOpRoleAttrName()]) == int(optimize_role):\n return True\n return False\n\n def _extract_section_ops(self, ops, cut_point_name):\n \"\"\"\n Extract ops in the given section \n \"\"\"\n output_names = set(cut_point_name)\n relevant_op_flags = [True] * len(ops)\n for i, op in reversed(list(enumerate(ops))):\n if not self._is_opt_role_op(op) and _some_in_set_(\n op.desc.output_arg_names(), output_names):\n for name in op.desc.input_arg_names():\n output_names.add(name)\n elif op.desc.type() == \"print\" and op.desc.input_arg_names()[\n 0] in output_names:\n continue\n else:\n relevant_op_flags[i] = False\n\n op_path = [ops[i] for i in range(len(ops)) if relevant_op_flags[i]]\n return op_path\n\n def _find_section_opt(self, ops, params):\n res = self._extract_section_opt_ops(ops, params)\n return res\n\n def _split_program(self, main_program, cut_list):\n programs = []\n block = main_program.block(0)\n whole_parameters = [e.name for e in block.all_parameters()]\n cut_var_names = []\n cut_len = len(cut_list)\n sec_params = []\n for i, cut_vars in enumerate(cut_list[:-1]):\n cut_var_names.append([cut_var.name for cut_var in cut_vars])\n for i, cut_vars in reversed(list(enumerate(cut_list[:-1]))):\n cut_var_names.append(\n [_append_grad_suffix_(cut_var.name) for cut_var in cut_vars])\n if i == 0:\n cut_var_names[-1] += [var.name for var in cut_list[-1]]\n ops = block.ops[:]\n for i, cut_vars in enumerate(cut_var_names):\n program = {\n \"program\": Program(),\n \"input_set\": set(),\n \"output_set\": set()\n }\n cur_ops = self._extract_section_ops(ops, cut_vars)\n if i == 0:\n for op in ops:\n if self._is_lr_role_op(op):\n cur_ops.append(op)\n #prevent inplace in/out\n program[\"input_set\"].update(\n self._find_input_output(\n cur_ops, [], is_forward=True))\n for e in cur_ops:\n ops.remove(e)\n\n if i < cut_len:\n sec_params.append(\n self._find_persistable_vars(cur_ops, whole_parameters))\n if i >= cut_len - 1:\n opt_ops = self._find_section_opt(\n ops, sec_params[2 * cut_len - 2 - i])\n\n for e in opt_ops:\n ops.remove(e)\n cur_ops += opt_ops\n\n op_descs = [op.desc for op in cur_ops]\n for op_desc in op_descs:\n ap_op = program[\"program\"].block(0).desc.append_op()\n ap_op.copy_from(op_desc)\n program[\"input_set\"].update(\n self._find_input_output(\n cur_ops, cut_vars, is_forward=True))\n program[\"input_set\"].update(sec_params[min(i, 2 * cut_len - 2 - i)])\n program[\"output_set\"].update(\n self._find_input_output(\n cur_ops, cut_vars, is_forward=False))\n programs.append(program)\n program = {\n \"program\": Program(),\n \"input_set\": set(),\n \"output_set\": set()\n }\n op_descs = [op.desc for op in ops]\n for op_desc in op_descs:\n ap_op = program[\"program\"].block(0).desc.append_op()\n ap_op.copy_from(op_desc)\n program[\"input_set\"].update(\n [cut_var.name + \"@GRAD\" for cut_var in cut_list[0]])\n program[\"input_set\"].update(\n self._find_input_output(\n ops, [], is_forward=True))\n program[\"input_set\"].update(sec_params[0])\n programs.append(program)\n inputs = set()\n for program in reversed(list(programs)):\n output_list = list(program[\"output_set\"])\n for output in output_list:\n if output not in inputs:\n program[\"output_set\"].remove(output)\n inputs.update(program[\"input_set\"])\n return programs\n\n def minimize(self,\n loss,\n startup_program=None,\n parameter_list=None,\n no_grad_set=None):\n self._optimizer.minimize(loss, startup_program, parameter_list,\n no_grad_set)\n program = loss.block.program\n program_list = self._split_program(program, self._cut_list)\n for p in program_list:\n self._create_vars(p[\"program\"].block(0), program)\n whole_parameters = [e.name for e in program.block(0).all_parameters()]\n param_need_sync = []\n for i, section_p in enumerate(program_list):\n if not isinstance(self._place_list[i], core.CUDAPlace):\n continue\n section_var = [e for e in section_p[\"program\"].block(0).vars]\n for p in section_var:\n if p in whole_parameters:\n param_need_sync.append(p)\n program._pipeline_opt = {\n \"trainer\": \"PipelineTrainer\",\n \"device_worker\": \"Section\",\n \"section_program_list\": program_list,\n \"place_list\": self._place_list,\n \"concurrency_list\": self._concurrency_list,\n \"queue_size\": self._queue_size,\n \"start_cpu_core_id\": self._start_cpu_core_id,\n \"sync_steps\": self._sync_steps,\n \"param_need_sync\": param_need_sync\n }\n\n\nclass LookaheadOptimizer(object):\n \"\"\"\n This implements the Lookahead optimizer of the\n paper : https://arxiv.org/abs/1907.08610.\n\n Lookahead keeps two sets of params: the fast_params and\n the slow_params. inner_optimizer update fast_params every \n training step. Lookahead updates the slow_params and fast_params \n every k training steps as follows:\n\n .. math::\n \n slow\\_param_t &= slow\\_param_{t-1} + \\\\alpha * (fast\\_param_{t-1} - slow\\_param_{t-1})\n\t\n\tfast\\_param_t &= slow\\_param_t\n\n Args:\n inner_optimizer (Optimizer): The optimizer that update fast params step by step. \n alpha (float): The learning rate of Lookahead.\n k (int): The slow params is updated every k steps.\n\n Examples:\n .. code-block:: python\n\n import paddle\n import paddle.fluid as fluid\n import numpy as np\n\n\t x = fluid.layers.data(name='x', shape=[2], dtype='float32')\n\t label = fluid.layers.data(name=\"label\", shape=[1], dtype=\"int64\")\n\t y = fluid.layers.fc(input=[x], size=2, act=\"softmax\")\n\t loss = fluid.layers.cross_entropy(input=y, label=label)\n\t loss = fluid.layers.mean(x=loss)\n\t sgd = fluid.optimizer.SGD(learning_rate=0.01)\n\t optimizer = fluid.optimizer.LookaheadOptimizer(sgd,\n alpha=0.5,\n k=5)\n\t optimizer.minimize(loss)\n\t main_program = fluid.default_main_program()\n\t place = fluid.CPUPlace()\n\t exe = fluid.Executor(place)\n\t exe.run(fluid.default_startup_program())\n\n\t feeder = fluid.DataFeeder(feed_list=[x, label], place=place)\n\n\t step = 0\n while(step < 10):\n step += 1\n\t\texe.run(fluid.default_main_program(),\n \tfeed=feeder.feed(batch_data))\n\n \"\"\"\n\n def __init__(self, inner_optimizer, alpha=0.5, k=5):\n\n assert (inner_optimizer is not None), \"inner optimizer can not be None\"\n assert (\n 0.0 <= alpha <= 1.0\n ), \"alpha should be larger or equal to 0.0, and less or equal than 1.0\"\n assert (isinstance(k, int) and k > 0), \"k should be a positive integer\"\n\n self.inner_optimizer = inner_optimizer\n self.alpha = alpha\n self.k = k\n self.type = \"lookahead\"\n\n def minimize(self, loss, startup_program=None):\n\n # Apply inner optimizer to the main_program\n mini_out = self.inner_optimizer.minimize(\n loss, startup_program=startup_program)\n\n # Get startup_program and main_program\n if startup_program is None:\n startup_program = default_startup_program()\n main_block = loss.block\n\n # add some vars to the main_program\n params = [param.name for param in main_block.all_parameters()]\n param_to_slow = {}\n for param in params:\n fast_var = main_block.var(param)\n assert (fast_var is not None)\n slow_var = main_block.create_var(\n name=param + \"@SLOW\",\n shape=fast_var.shape,\n dtype=fast_var.dtype,\n persistable=True)\n param_to_slow[param] = slow_var\n\n # add some vars to the startup_program\n startup_block = startup_program.global_block()\n for param in params:\n fast_var = startup_block.var(param)\n assert (fast_var is not None)\n slow_var = startup_block.create_var(\n name=param + \"@SLOW\",\n shape=fast_var.shape,\n dtype=fast_var.dtype,\n persistable=True)\n\n startup_block.append_op(\n type=\"assign\",\n inputs={\"X\": fast_var},\n outputs={\"Out\": slow_var})\n\n # Add Var k to main prog and startup prog\n k = layers.create_global_var(\n name=\"lookahead_k\",\n shape=[1],\n value=int(self.k),\n dtype='int32',\n persistable=True)\n\n # Add Var alpha to main prog and startup prog\n alpha = layers.create_global_var(\n name=\"lookahead_alpha\",\n shape=[1],\n value=float(self.alpha),\n dtype='float32',\n persistable=True)\n\n # Add Var step\n step = layers.create_global_var(\n name=\"lookahead_step\",\n shape=[1],\n value=int(0),\n dtype='int32',\n persistable=True)\n layers.increment(x=step, value=1.0, in_place=True)\n\n # lookahead\n zero_var = layers.fill_constant(shape=[1], dtype='float32', value=0.0)\n\n one_var = layers.fill_constant(shape=[1], dtype='float32', value=1.0)\n\n mod = layers.elementwise_mod(step, k)\n with layers.control_flow.Switch() as switch:\n with switch.case(mod == zero_var):\n for param_name in params:\n fast_var = main_block.var(param_name)\n slow_var = param_to_slow[param_name]\n tmp_var = layers.elementwise_add(\n layers.elementwise_mul(fast_var, alpha),\n layers.elementwise_mul(\n slow_var, layers.elementwise_sub(one_var, alpha)))\n layers.assign(input=tmp_var, output=slow_var)\n layers.assign(input=tmp_var, output=fast_var)\n with switch.default():\n pass\n return mini_out\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
clvrai/goal_prox_il
|
[
"7c809b2ee575a69a14997068db06f3c1f3c8bd08",
"7c809b2ee575a69a14997068db06f3c1f3c8bd08",
"7c809b2ee575a69a14997068db06f3c1f3c8bd08",
"7c809b2ee575a69a14997068db06f3c1f3c8bd08",
"7c809b2ee575a69a14997068db06f3c1f3c8bd08"
] |
[
"goal_prox/method/goal_gail_discriminator.py",
"goal_prox/envs/fetch/custom_push.py",
"d4rl/d4rl/hand_manipulation_suite/door_v0.py",
"rl-toolkit/rlf/baselines/vec_env/shmem_vec_env.py",
"rl-toolkit/rlf/baselines/common/running_mean_std.py"
] |
[
"import torch\n\nfrom rlf.algos.il.gaifo import GaifoDiscrim\nfrom rlf.algos.nested_algo import NestedAlgo\nfrom goal_prox.method.goal_gail_algo import GoalGAILAlgo\nfrom goal_prox.method.goal_gail_dataset import GoalGAILTrajDataset\n\n\nclass GoalGAIL(NestedAlgo):\n def __init__(self, agent_updater=GoalGAILAlgo(), get_discrim=None):\n super().__init__([GoalGAILDiscrim(get_discrim), agent_updater], 1)\n\n\nclass GoalGAILDiscrim(GaifoDiscrim):\n def _get_traj_dataset(self, traj_load_path):\n return GoalGAILTrajDataset(traj_load_path, self.args)\n\n def _trans_batches(self, expert_batch, agent_batch):\n return expert_batch, agent_batch\n\n def _get_sampler(self, storage):\n expert_experience = self.expert_dataset.get_generator(\n num_batch=1,\n batch_size=self.args.traj_batch_size,\n relabel_ob=storage.relabel_ob,\n is_reached=storage.is_reached,\n )\n agent_experience = storage.get_generator(\n num_batch=1, batch_size=self.args.traj_batch_size\n )\n return expert_experience, agent_experience\n\n def update(self, storage):\n self.update_i += 1\n\n storage.set_reward_function(self._compute_reward)\n\n if len(storage) < 1:\n return {}\n\n if self.args.goal_gail_weight == 0:\n return {}\n\n if self.update_i % self.args.goal_gail_update_every:\n return {}\n\n log_vals = self._update_reward_func(storage)\n\n return log_vals\n\n def _compute_reward(self, obs, next_obs):\n state = obs\n next_state = next_obs\n\n d_val = self.discrim_net(state, next_state)\n s = torch.sigmoid(d_val)\n eps = 1e-20\n if self.args.reward_type == 'airl':\n reward = (s + eps).log() - (1 - s + eps).log()\n elif self.args.reward_type == 'gail':\n reward = (s + eps).log()\n elif self.args.reward_type == 'raw':\n reward = d_val\n elif self.args.reward_type == 'gaifo':\n reward = -1.0 * (s + eps).log()\n else:\n raise ValueError(f\"Unrecognized reward type {self.args.reward_type}\")\n return reward\n\n def get_add_args(self, parser):\n super().get_add_args(parser)\n parser.add_argument('--goal-gail-update-every', type=int, default=10)\n",
"import os\nfrom gym import utils\nfrom gym.envs.robotics import fetch_env\nimport numpy as np\nfrom goal_prox.envs.holdout_sampler import HoldoutSampler, LineHoldoutSampler\nfrom goal_prox.envs.old_holdout_sampler import OldHoldoutSampler\n\n\n# Ensure we get the path separator correct on windows\nMODEL_XML_PATH = os.path.join('fetch', 'push.xml')\n\nY_NOISE = 0.02\nX_NOISE = 0.05\nOBJ_X_NOISE = 0.05\nOFFSET = 0.10\n\n\nclass FetchPushEnvCustom(fetch_env.FetchEnv, utils.EzPickle):\n def __init__(self, reward_type='dense'):\n initial_qpos = {\n 'robot0:slide0': 0.405,\n 'robot0:slide1': 0.48,\n 'robot0:slide2': 0.0,\n 'object0:joint': [1.25, 0.53, 0.4, 1., 0., 0., 0.],\n }\n self.coverage = 1.0\n self.goal_noise = True\n self.rnd_gen = False\n self.set_noise_ratio(1.0, 1.0)\n fetch_env.FetchEnv.__init__(\n self, MODEL_XML_PATH, has_object=True, block_gripper=True, n_substeps=20,\n gripper_extra_height=0.0, target_in_the_air=False, target_offset=0,\n # The ranges shouldn't matter because we sample ourselves\n obj_range=0.1, target_range=0, distance_threshold=0.05,\n initial_qpos=initial_qpos, reward_type=reward_type)\n utils.EzPickle.__init__(self)\n\n def set_noise_ratio(self, noise_ratio, goal_noise_ratio):\n self.obj_sampler = OldHoldoutSampler([-noise_ratio * OBJ_X_NOISE, 0],\n [noise_ratio * OBJ_X_NOISE, noise_ratio * Y_NOISE * 2], 4)\n self.goal_sampler = OldHoldoutSampler(\n [-goal_noise_ratio*X_NOISE, -goal_noise_ratio*Y_NOISE * 2],\n [goal_noise_ratio*X_NOISE, 0], 4)\n # self.obj_sampler = OldHoldoutSampler([-noise_ratio * OBJ_X_NOISE, -noise_ratio * Y_NOISE],\n # [noise_ratio * OBJ_X_NOISE, noise_ratio * Y_NOISE], 4)\n # self.goal_sampler = OldHoldoutSampler(\n # [-goal_noise_ratio*X_NOISE, -goal_noise_ratio*Y_NOISE],\n # [goal_noise_ratio*X_NOISE, goal_noise_ratio*Y_NOISE], 4)\n\n def _get_obs(self):\n obs = super()._get_obs()\n obs['observation'] = np.concatenate([obs['observation'],\n obs['desired_goal']])\n return obs\n\n def relabel_ob(self, ob_current, ob_future):\n import torch\n\n if isinstance(ob_current, torch.Tensor):\n return torch.cat([ob_current[:-3], ob_future[-3:]])\n return np.concatenate([ob_current[:-3], ob_future[-3:]])\n\n def is_reached(self, ob):\n import torch\n\n if isinstance(ob, torch.Tensor):\n ob = ob.cpu()\n dist = np.linalg.norm(ob[-3:] - ob[3:6])\n return float(dist < self.distance_threshold)\n\n def _reset_sim(self):\n self.sim.set_state(self.initial_state)\n\n # Randomize start position of object.\n if self.has_object:\n object_xpos = self.initial_gripper_xpos[:2] + np.array([0.0, OFFSET])\n object_xpos += self.obj_sampler.sample(self.coverage,\n self.np_random)\n\n object_qpos = self.sim.data.get_joint_qpos('object0:joint')\n assert object_qpos.shape == (7,)\n object_qpos[:2] = object_xpos\n self.sim.data.set_joint_qpos('object0:joint', object_qpos)\n\n self.sim.forward()\n return True\n\n def _sample_goal(self):\n goal = self.initial_gripper_xpos[:3] + np.array([0.0, -1*OFFSET, 0.0])\n if self.goal_noise:\n goal[:2]+= self.goal_sampler.sample(self.coverage, self.np_random)\n\n goal += self.target_offset\n goal[2] = self.height_offset\n return goal.copy()\n\n def _viewer_setup(self):\n body_id = self.sim.model.body_name2id('robot0:gripper_link')\n lookat = self.sim.data.body_xpos[body_id]\n lookat = [1.34193362, 0.74910034, 0.55472272]\n for idx, value in enumerate(lookat):\n self.viewer.cam.lookat[idx] = value\n self.viewer.cam.distance = 1.3\n self.viewer.cam.azimuth = 132\n self.viewer.cam.elevation = -14.\n\n def _render_callback(self):\n # Visualize target.\n sites_offset = (self.sim.data.site_xpos - self.sim.model.site_pos).copy()\n site_id = self.sim.model.site_name2id('target0')\n self.sim.model.site_pos[site_id] = self.goal - sites_offset[0]\n self.sim.forward()\n\nclass FetchDebugPushEnv(FetchPushEnvCustom):\n def set_noise_ratio(self, noise_ratio, goal_noise_ratio):\n noise_ratio *= 1\n y_noise_scale = 0.15 / (noise_ratio * Y_NOISE)\n #y_noise_scale = 1.0\n self.obj_sampler = LineHoldoutSampler(\n [-noise_ratio * OBJ_X_NOISE, -y_noise_scale*noise_ratio * Y_NOISE],\n [noise_ratio * OBJ_X_NOISE, y_noise_scale*noise_ratio * Y_NOISE])\n\n self.goal_sampler = HoldoutSampler(\n [-goal_noise_ratio*X_NOISE, -goal_noise_ratio*Y_NOISE],\n [goal_noise_ratio*X_NOISE, goal_noise_ratio*Y_NOISE], 1, True)\n\n\n",
"import numpy as np\nfrom gym import utils\nfrom gym import spaces\nfrom mjrl.envs import mujoco_env\nfrom mujoco_py import MjViewer\nfrom d4rl import offline_env\nimport os\n\nADD_BONUS_REWARDS = True\n\nclass DoorEnvV0(mujoco_env.MujocoEnv, utils.EzPickle, offline_env.OfflineEnv):\n def __init__(self, **kwargs):\n offline_env.OfflineEnv.__init__(self, **kwargs)\n self.door_hinge_did = 0\n self.door_bid = 0\n self.grasp_sid = 0\n self.handle_sid = 0\n curr_dir = os.path.dirname(os.path.abspath(__file__))\n mujoco_env.MujocoEnv.__init__(self, curr_dir+'/assets/DAPG_door.xml', 5)\n\n # Override action_space to -1, 1\n self.action_space = spaces.Box(low=-1.0, high=1.0, dtype=np.float32, shape=self.action_space.shape)\n\n # change actuator sensitivity\n self.sim.model.actuator_gainprm[self.sim.model.actuator_name2id('A_WRJ1'):self.sim.model.actuator_name2id('A_WRJ0')+1,:3] = np.array([10, 0, 0])\n self.sim.model.actuator_gainprm[self.sim.model.actuator_name2id('A_FFJ3'):self.sim.model.actuator_name2id('A_THJ0')+1,:3] = np.array([1, 0, 0])\n self.sim.model.actuator_biasprm[self.sim.model.actuator_name2id('A_WRJ1'):self.sim.model.actuator_name2id('A_WRJ0')+1,:3] = np.array([0, -10, 0])\n self.sim.model.actuator_biasprm[self.sim.model.actuator_name2id('A_FFJ3'):self.sim.model.actuator_name2id('A_THJ0')+1,:3] = np.array([0, -1, 0])\n\n utils.EzPickle.__init__(self)\n ob = self.reset_model()\n self.act_mid = np.mean(self.model.actuator_ctrlrange, axis=1)\n self.act_rng = 0.5*(self.model.actuator_ctrlrange[:,1]-self.model.actuator_ctrlrange[:,0])\n self.door_hinge_did = self.model.jnt_dofadr[self.model.joint_name2id('door_hinge')]\n self.grasp_sid = self.model.site_name2id('S_grasp')\n self.handle_sid = self.model.site_name2id('S_handle')\n self.door_bid = self.model.body_name2id('frame')\n\n def step(self, a):\n a = np.clip(a, -1.0, 1.0)\n try:\n a = self.act_mid + a*self.act_rng # mean center and scale\n except:\n a = a # only for the initialization phase\n self.do_simulation(a, self.frame_skip)\n ob = self.get_obs()\n handle_pos = self.data.site_xpos[self.handle_sid].ravel()\n palm_pos = self.data.site_xpos[self.grasp_sid].ravel()\n door_pos = self.data.qpos[self.door_hinge_did]\n\n # get to handle\n reward = -0.1*np.linalg.norm(palm_pos-handle_pos)\n # open door\n reward += -0.1*(door_pos - 1.57)*(door_pos - 1.57)\n # velocity cost\n reward += -1e-5*np.sum(self.data.qvel**2)\n\n if ADD_BONUS_REWARDS:\n # Bonus\n if door_pos > 0.2:\n reward += 2\n if door_pos > 1.0:\n reward += 8\n if door_pos > 1.35:\n reward += 10\n\n goal_achieved = True if door_pos >= 1.35 else False\n\n return ob, reward, False, dict(goal_achieved=goal_achieved)\n\n def get_obs(self):\n # qpos for hand\n # xpos for obj\n # xpos for target\n qp = self.data.qpos.ravel()\n handle_pos = self.data.site_xpos[self.handle_sid].ravel()\n palm_pos = self.data.site_xpos[self.grasp_sid].ravel()\n door_pos = np.array([self.data.qpos[self.door_hinge_did]])\n if door_pos > 1.0:\n door_open = 1.0\n else:\n door_open = -1.0\n latch_pos = qp[-1]\n return np.concatenate([qp[1:-2], [latch_pos], door_pos, palm_pos, handle_pos, palm_pos-handle_pos, [door_open]])\n\n def reset_model(self):\n qp = self.init_qpos.copy()\n qv = self.init_qvel.copy()\n self.set_state(qp, qv)\n\n self.model.body_pos[self.door_bid,0] = self.np_random.uniform(low=-0.3, high=-0.2)\n self.model.body_pos[self.door_bid,1] = self.np_random.uniform(low=0.25, high=0.35)\n self.model.body_pos[self.door_bid,2] = self.np_random.uniform(low=0.252, high=0.35)\n self.sim.forward()\n return self.get_obs()\n\n def get_env_state(self):\n \"\"\"\n Get state of hand as well as objects and targets in the scene\n \"\"\"\n qp = self.data.qpos.ravel().copy()\n qv = self.data.qvel.ravel().copy()\n door_body_pos = self.model.body_pos[self.door_bid].ravel().copy()\n return dict(qpos=qp, qvel=qv, door_body_pos=door_body_pos)\n\n def set_env_state(self, state_dict):\n \"\"\"\n Set the state which includes hand as well as objects and targets in the scene\n \"\"\"\n qp = state_dict['qpos']\n qv = state_dict['qvel']\n self.set_state(qp, qv)\n self.model.body_pos[self.door_bid] = state_dict['door_body_pos']\n self.sim.forward()\n\n def mj_viewer_setup(self):\n self.viewer = MjViewer(self.sim)\n self.viewer.cam.azimuth = 90\n self.sim.forward()\n self.viewer.cam.distance = 1.5\n\n def evaluate_success(self, paths):\n num_success = 0\n num_paths = len(paths)\n # success if door open for 25 steps\n for path in paths:\n if np.sum(path['env_infos']['goal_achieved']) > 25:\n num_success += 1\n success_percentage = num_success*100.0/num_paths\n return success_percentage\n\n def set_noise_ratio(self, noise_ratio, goal_noise_ratio):\n pass\n",
"\"\"\"\nAn interface for asynchronous vectorized environments.\n\"\"\"\n\nimport multiprocessing as mp\nimport numpy as np\nfrom .vec_env import VecEnv, CloudpickleWrapper, clear_mpi_env_vars\nimport ctypes\nfrom rlf.baselines import logger\nfrom collections.abc import Iterable\n\nfrom .util import dict_to_obs, obs_space_info, obs_to_dict\n\n_NP_TO_CT = {np.float32: ctypes.c_float,\n np.float64: ctypes.c_double,\n np.int32: ctypes.c_int32,\n np.int8: ctypes.c_int8,\n np.uint8: ctypes.c_char,\n bool: ctypes.c_bool}\n\n\nclass ShmemVecEnv(VecEnv):\n \"\"\"\n Optimized version of SubprocVecEnv that uses shared variables to communicate observations.\n \"\"\"\n\n def __init__(self, env_fns, spaces=None, context='spawn'):\n \"\"\"\n If you don't specify observation_space, we'll have to create a dummy\n environment to get it.\n \"\"\"\n ctx = mp.get_context(context)\n if spaces:\n observation_space, action_space = spaces\n else:\n # Was very annoying to see this every single time.\n #logger.log('Creating dummy env object to get spaces')\n with logger.scoped_configure(format_strs=[]):\n dummy = env_fns[0]()\n observation_space, action_space = dummy.observation_space, dummy.action_space\n dummy.close()\n del dummy\n VecEnv.__init__(self, len(env_fns), observation_space, action_space)\n self.obs_keys, self.obs_shapes, self.obs_dtypes = obs_space_info(observation_space)\n self.obs_bufs = [\n {k: ctx.Array(_NP_TO_CT[self.obs_dtypes[k].type], int(np.prod(self.obs_shapes[k]))) for k in self.obs_keys}\n for _ in env_fns]\n self.parent_pipes = []\n self.procs = []\n with clear_mpi_env_vars():\n for env_fn, obs_buf in zip(env_fns, self.obs_bufs):\n wrapped_fn = CloudpickleWrapper(env_fn)\n parent_pipe, child_pipe = ctx.Pipe()\n proc = ctx.Process(target=_subproc_worker,\n args=(child_pipe, parent_pipe, wrapped_fn, obs_buf, self.obs_shapes, self.obs_dtypes, self.obs_keys))\n proc.daemon = True\n self.procs.append(proc)\n self.parent_pipes.append(parent_pipe)\n proc.start()\n child_pipe.close()\n self.waiting_step = False\n self.viewer = None\n\n def reset(self):\n if self.waiting_step:\n logger.warn('Called reset() while waiting for the step to complete')\n self.step_wait()\n for pipe in self.parent_pipes:\n pipe.send(('reset', None))\n return self._decode_obses([pipe.recv() for pipe in self.parent_pipes])\n\n def step_async(self, actions):\n assert len(actions) == len(self.parent_pipes)\n for pipe, act in zip(self.parent_pipes, actions):\n pipe.send(('step', act))\n\n def step_wait(self):\n outs = [pipe.recv() for pipe in self.parent_pipes]\n obs, rews, dones, infos = zip(*outs)\n return self._decode_obses(obs), np.array(rews), np.array(dones), infos\n\n def close_extras(self):\n if self.waiting_step:\n self.step_wait()\n for pipe in self.parent_pipes:\n pipe.send(('close', None))\n for pipe in self.parent_pipes:\n pipe.recv()\n pipe.close()\n for proc in self.procs:\n proc.join()\n\n def get_images(self, mode='human', **kwargs):\n N = len(self.parent_pipes)\n all_pipe_kwargs = []\n for i in range(N):\n pipe_kwargs = {}\n for k, v in kwargs.items():\n if isinstance(v, Iterable):\n pipe_kwargs[k] = kwargs[k][i]\n else:\n pipe_kwargs[k] = kwargs[k]\n all_pipe_kwargs.append(pipe_kwargs)\n\n for pipe, p_kwargs in zip(self.parent_pipes, all_pipe_kwargs):\n pipe.send(('render', (mode, p_kwargs)))\n return [pipe.recv() for pipe in self.parent_pipes]\n\n def _decode_obses(self, obs):\n result = {}\n for k in self.obs_keys:\n bufs = [b[k] for b in self.obs_bufs]\n o = [np.frombuffer(b.get_obj(), dtype=self.obs_dtypes[k]).reshape(self.obs_shapes[k]) for b in bufs]\n result[k] = np.array(o)\n return dict_to_obs(result)\n\n\ndef _subproc_worker(pipe, parent_pipe, env_fn_wrapper, obs_bufs, obs_shapes, obs_dtypes, keys):\n \"\"\"\n Control a single environment instance using IPC and\n shared memory.\n \"\"\"\n def _write_obs(maybe_dict_obs):\n flatdict = obs_to_dict(maybe_dict_obs)\n for k in keys:\n dst = obs_bufs[k].get_obj()\n dst_np = np.frombuffer(dst, dtype=obs_dtypes[k]).reshape(obs_shapes[k]) # pylint: disable=W0212\n np.copyto(dst_np, flatdict[k])\n\n env = env_fn_wrapper.x()\n parent_pipe.close()\n try:\n while True:\n cmd, data = pipe.recv()\n if cmd == 'reset':\n pipe.send(_write_obs(env.reset()))\n elif cmd == 'step':\n obs, reward, done, info = env.step(data)\n if done:\n final_obs = obs\n if isinstance(obs, dict):\n final_obs = obs['observation']\n info['final_obs'] = final_obs\n obs = env.reset()\n pipe.send((_write_obs(obs), reward, done, info))\n elif cmd == 'render':\n pipe.send(env.render(mode=data[0], **data[1]))\n elif cmd == 'close':\n pipe.send(None)\n break\n else:\n raise RuntimeError('Got unrecognized cmd %s' % cmd)\n except KeyboardInterrupt:\n print('ShmemVecEnv worker: got KeyboardInterrupt')\n finally:\n env.close()\n",
"import numpy as np\n\nclass RunningMeanStd(object):\n # https://en.wikipedia.org/wiki/Algorithms_for_calculating_variance#Parallel_algorithm\n def __init__(self, epsilon=1e-4, shape=()):\n # Note: This was float64 earlier, but we changed it to float32\n self.mean = np.zeros(shape, 'float32')\n self.var = np.ones(shape, 'float32')\n self.count = epsilon\n\n def update(self, x):\n batch_mean = np.mean(x, axis=0, dtype='float32')\n batch_var = np.var(x, axis=0, dtype='float32')\n batch_count = x.shape[0]\n self.update_from_moments(batch_mean, batch_var, batch_count)\n\n def update_from_moments(self, batch_mean, batch_var, batch_count):\n self.mean, self.var, self.count = update_mean_var_count_from_moments(\n self.mean, self.var, self.count, batch_mean, batch_var, batch_count)\n\n def __str__(self):\n return 'Mean: %s, Var: %s, Count: %i' % (str(list(self.mean)), str(list(self.var)), self.count)\n\ndef update_mean_var_count_from_moments(mean, var, count, batch_mean, batch_var, batch_count):\n delta = batch_mean - mean\n tot_count = count + batch_count\n\n new_mean = mean + delta * batch_count / tot_count\n m_a = var * count\n m_b = batch_var * batch_count\n M2 = m_a + m_b + np.square(delta, dtype='float32') * count * batch_count / tot_count\n new_var = M2 / tot_count\n new_count = tot_count\n\n return new_mean, new_var, new_count\n"
] |
[
[
"torch.sigmoid"
],
[
"numpy.concatenate",
"numpy.array",
"numpy.linalg.norm",
"torch.cat"
],
[
"numpy.clip",
"numpy.linalg.norm",
"numpy.concatenate",
"numpy.mean",
"numpy.array",
"numpy.sum"
],
[
"numpy.copyto",
"numpy.frombuffer",
"numpy.array",
"numpy.prod"
],
[
"numpy.square",
"numpy.ones",
"numpy.mean",
"numpy.var",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [
"1.10",
"1.12",
"1.11",
"1.19",
"1.24",
"1.13",
"1.16",
"1.9",
"1.18",
"1.23",
"1.21",
"1.22",
"1.20",
"1.7",
"1.15",
"1.14",
"1.17",
"1.8"
],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Tommy-Ngx/AutoGradingOA
|
[
"5e69bd38abaf01f03d8d837da68701a86bac1bb0"
] |
[
"ClsKL/vis/knee_tsne.py"
] |
[
"# -*- coding: utf-8 -*-\n\nimport os, sys, pdb\nimport numpy as np\nimport deepdish as dd\nfrom time import time\nimport matplotlib.pyplot as plt\nfrom sklearn import manifold\nfrom yellowbrick.text import TSNEVisualizer\n\n\n# knee = dd.io.load('./data/feas1646_auto.h5')\nknee = dd.io.load('./data/tsne/vgg19_feas1656_manual.h5')\nX = knee[\"data\"]\ny = knee[\"target\"]\n\nn_samples, n_features = X.shape\n\ntsne = manifold.TSNE(n_components=2, perplexity=20, early_exaggeration=4.0, learning_rate=1000, n_iter=1000,\n n_iter_without_progress=50, min_grad_norm=0, init='pca', method='exact', verbose=2)\nY = tsne.fit_transform(X)\n\n\nplt.figure(figsize=(6, 5))\ncolors = ['b', 'g', 'r', 'y', 'k']\ntarget_ids = [0, 1, 2, 3, 4]\ntarget_labels = [\"Grade 0\", \"Grade 1\", \"Grade 2\", \"Grade 3\", \"Grade 4\"]\nfor i, c, label in zip(target_ids, colors, target_labels):\n newY = np.array([Y[ind] for ind, e in enumerate(y) if e==i])\n plt.scatter(newY[:, 0], newY[:, 1], c=c, label=label)\nplt.legend()\nplt.title(\"Features of VGG-19-Ordinal on Manual\")\nplt.savefig(\"vgg19_tsne.pdf\")\n# plt.tight_layout()\nplt.show()\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.title",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.savefig",
"sklearn.manifold.TSNE",
"matplotlib.pyplot.show",
"matplotlib.pyplot.figure"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
DeFi-Analytics/DeFi-Analytics
|
[
"25fa0588758313c6a207848080a5f2d994316a24"
] |
[
"script4Task/getDEXPriceMinutely.py"
] |
[
"import requests\nimport pandas as pd\nimport numpy as np\nfrom pycoingecko import CoinGeckoAPI\nimport time\nfrom datetime import datetime\n\n\nscriptPath = __file__\npath = scriptPath[:-35] + '/data/'\nfilepath = path + 'LMPoolData_ShortTerm.csv'\n\nwhile True: \n start_time = pd.Timestamp.now()\n print('Get data at '+str(start_time)+' ...')\n requests.adapters.DEFAULT_RETRIES = 5\n link='https://api.defichain.io/v1/listpoolpairs?start=0&limit=500&network=mainnet&including_start=false'\n siteContent = requests.get(link)\n \n# s = requests.Session()\n# s.mount(link, HTTPAdapter(max_retries=5))\n\n\n dfLMPoolData = pd.read_json(siteContent.text).transpose()\n dfLMPoolData.drop(['name', 'status','tradeEnabled','ownerAddress','blockCommissionA',\n 'blockCommissionB','rewardPct','creationTx','creationHeight'], axis=1,inplace=True)\n cg = CoinGeckoAPI()\n DFIData = cg.get_price(ids='defichain', vs_currencies=['btc','eth','usd'])\n \n DogeCoinData = cg.get_price(ids='dogecoin', vs_currencies=['usd'])\n dogeDFIPrice = DFIData['defichain']['usd']/DogeCoinData['dogecoin']['usd']\n LiteCoinData = cg.get_price(ids='litecoin', vs_currencies=['usd'])\n ltcDFIPrice = DFIData['defichain']['usd']/LiteCoinData['litecoin']['usd']\n BCHCoinData = cg.get_price(ids='bitcoin-cash', vs_currencies=['usd'])\n bchDFIPrice = DFIData['defichain']['usd']/BCHCoinData['bitcoin-cash']['usd']\n USDCCoinData = cg.get_price(ids='usd-coin', vs_currencies=['usd'])\n USDCDFIPrice = DFIData['defichain']['usd'] / USDCCoinData['usd-coin']['usd']\n \n dfLMPoolData['DFIPrices'] = None\n dfLMPoolData.loc[4,'DFIPrices'] = DFIData['defichain']['eth']\n dfLMPoolData.loc[5,'DFIPrices'] = DFIData['defichain']['btc']\n dfLMPoolData.loc[6,'DFIPrices'] = DFIData['defichain']['usd']\n dfLMPoolData.loc[8,'DFIPrices'] = dogeDFIPrice\n dfLMPoolData.loc[10,'DFIPrices'] = ltcDFIPrice\n dfLMPoolData.loc[12,'DFIPrices'] = bchDFIPrice\n dfLMPoolData.loc[14, 'DFIPrices'] = USDCDFIPrice\n dfLMPoolData['Time'] = start_time\n \n # prices from Bittrex\n link='https://api.bittrex.com/v3/markets/tickers'\n siteContent = requests.get(link) \n dfBittrexTicker = pd.read_json(siteContent.text) \n dfLMPoolData['DFIPricesBittrex'] = None\n dfLMPoolData.loc[5,'DFIPricesBittrex'] = dfBittrexTicker[dfBittrexTicker['symbol']=='DFI-BTC']['lastTradeRate'].values[0]\n dfLMPoolData.loc[6,'DFIPricesBittrex'] = dfBittrexTicker[dfBittrexTicker['symbol']=='DFI-USDT']['lastTradeRate'].values[0]\n \n dfOldLMPoolData = pd.read_csv(filepath,index_col=0)\n dfLMPoolData = dfOldLMPoolData.append(dfLMPoolData, sort=False)\n dfLMPoolData = dfLMPoolData[-540:]\n dfLMPoolData.reset_index(inplace=True, drop=True)\n \n dfLMPoolData.to_csv(filepath)\n \n # wait time before run again\n nowTimestamp = datetime.now()\n waitTime = 60-nowTimestamp.second\n print('...finished. Timestamp: '+str(datetime.now())+' wait-time:'+str(waitTime))\n time.sleep(waitTime)"
] |
[
[
"pandas.read_csv",
"pandas.Timestamp.now",
"pandas.read_json"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
kimig001/openpilot
|
[
"4af0c7f21c801ea23cc0e7fa5a86a577180755a4"
] |
[
"selfdrive/ntune.py"
] |
[
"import os\nimport fcntl\nimport signal\nimport json\nimport numpy as np\n\nfrom selfdrive.hardware import TICI\n\nCONF_PATH = '/data/ntune/'\nCONF_LQR_FILE = '/data/ntune/lat_lqr.json'\n\nntunes = {}\n\ndef file_watch_handler(signum, frame):\n global ntunes\n for ntune in ntunes.values():\n ntune.handle()\n\nclass nTune():\n def __init__(self, CP=None, controller=None, group=None):\n\n self.invalidated = False\n self.CP = CP\n self.lqr = None\n self.group = group\n self.config = {}\n\n if \"LatControlLQR\" in str(type(controller)):\n self.lqr = controller\n self.file = CONF_LQR_FILE\n self.lqr.A = np.array([0., 1., -0.22619643, 1.21822268]).reshape((2, 2))\n self.lqr.B = np.array([-1.92006585e-04, 3.95603032e-05]).reshape((2, 1))\n self.lqr.C = np.array([1., 0.]).reshape((1, 2))\n self.lqr.K = np.array([-110., 451.]).reshape((1, 2))\n self.lqr.L = np.array([0.33, 0.318]).reshape((2, 1))\n else:\n self.file = CONF_PATH + group + \".json\"\n\n if not os.path.exists(CONF_PATH):\n os.makedirs(CONF_PATH)\n\n self.read()\n\n try:\n signal.signal(signal.SIGIO, file_watch_handler)\n fd = os.open(CONF_PATH, os.O_RDONLY)\n fcntl.fcntl(fd, fcntl.F_SETSIG, 0)\n fcntl.fcntl(fd, fcntl.F_NOTIFY, fcntl.DN_MODIFY | fcntl.DN_CREATE | fcntl.DN_MULTISHOT)\n except Exception as ex:\n print(\"exception\", ex)\n pass\n\n def handle(self):\n try:\n if os.path.getsize(self.file) > 0:\n with open(self.file, 'r') as f:\n self.config = json.load(f)\n\n if self.checkValid():\n self.write_config(self.config)\n\n self.invalidated = True\n\n except:\n pass\n\n def check(self): # called by LatControlLQR.update\n if self.invalidated:\n self.invalidated = False\n self.update()\n\n def read(self):\n success = False\n try:\n if os.path.getsize(self.file) > 0:\n with open(self.file, 'r') as f:\n self.config = json.load(f)\n\n if self.checkValid():\n self.write_config(self.config)\n self.update()\n success = True\n except:\n pass\n\n if not success:\n try:\n self.write_default()\n with open(self.file, 'r') as f:\n self.config = json.load(f)\n if self.checkValid():\n self.write_config(self.config)\n self.update()\n except:\n pass\n\n def checkValue(self, key, min_, max_, default_):\n updated = False\n\n if key not in self.config:\n self.config.update({key: default_})\n updated = True\n elif min_ > self.config[key]:\n self.config.update({key: min_})\n updated = True\n elif max_ < self.config[key]:\n self.config.update({key: max_})\n updated = True\n\n return updated\n\n def checkValid(self):\n\n if self.lqr is not None:\n return self.checkValidLQR()\n elif self.group == \"common\":\n return self.checkValidCommon()\n else:\n return self.checkValidISCC()\n\n def update(self):\n\n if self.lqr is not None:\n self.updateLQR()\n\n def checkValidCommon(self):\n updated = False\n\n if self.checkValue(\"useLiveSteerRatio\", 0., 1., 1.):\n updated = True\n\n if self.checkValue(\"steerRatio\", 10.0, 20.0, 16.5):\n updated = True\n\n if self.checkValue(\"steerActuatorDelay\", 0., 0.8, 0.08):\n updated = True\n\n if self.checkValue(\"steerRateCost\", 0.1, 1.5, 0.4):\n updated = True\n\n if self.checkValue(\"cameraOffset\", -1.0, 1.0, -0.04 if TICI else 0.06):\n updated = True\n\n return updated\n\n def checkValidLQR(self):\n updated = False\n\n if self.checkValue(\"scale\", 500.0, 5000.0, 1800.0):\n updated = True\n\n if self.checkValue(\"ki\", 0.0, 0.2, 0.01):\n updated = True\n\n if self.checkValue(\"dcGain\", 0.002, 0.004, 0.0028):\n updated = True\n\n if self.checkValue(\"steerLimitTimer\", 0.5, 3.0, 2.5):\n updated = True\n\n return updated\n\n def checkValidISCC(self):\n updated = False\n\n if self.checkValue(\"sccGasFactor\", 0.5, 1.5, 1.10):\n updated = True\n\n if self.checkValue(\"sccBrakeFactor\", 0.5, 1.5, 1.08):\n updated = True\n\n if self.checkValue(\"sccCurvatureFactor\", 0.5, 1.5, 0.97):\n updated = True\n\n if self.checkValue(\"longitudinalActuatorDelayLowerBound\", 0.1, 1.5, 0.15):\n updated = True\n\n if self.checkValue(\"longitudinalActuatorDelayUpperBound\", 0.1, 1.5, 0.15):\n updated = True\n\n return updated\n\n def updateLQR(self):\n\n self.lqr.scale = float(self.config[\"scale\"])\n self.lqr.ki = float(self.config[\"ki\"])\n\n self.lqr.dc_gain = float(self.config[\"dcGain\"])\n\n self.lqr.sat_limit = float(self.config[\"steerLimitTimer\"])\n\n self.lqr.x_hat = np.array([[0], [0]])\n self.lqr.reset()\n\n def read_cp(self):\n\n try:\n if self.CP is not None:\n\n if self.CP.lateralTuning.which() == 'lqr' and self.lqr is not None:\n self.config[\"scale\"] = round(self.CP.lateralTuning.lqr.scale, 2)\n self.config[\"ki\"] = round(self.CP.lateralTuning.lqr.ki, 3)\n self.config[\"dcGain\"] = round(self.CP.lateralTuning.lqr.dcGain, 6)\n self.config[\"steerLimitTimer\"] = round(self.CP.steerLimitTimer, 2)\n self.config[\"steerMax\"] = round(self.CP.steerMaxV[0], 2)\n else:\n self.config[\"useLiveSteerRatio\"] = 1.\n self.config[\"steerRatio\"] = round(self.CP.steerRatio, 2)\n self.config[\"steerActuatorDelay\"] = round(self.CP.steerActuatorDelay, 2)\n self.config[\"steerRateCost\"] = round(self.CP.steerRateCost, 2)\n\n except:\n pass\n\n def write_default(self):\n\n try:\n self.read_cp()\n self.checkValid()\n self.write_config(self.config)\n except:\n pass\n\n def write_config(self, conf):\n try:\n with open(self.file, 'w') as f:\n json.dump(conf, f, indent=2, sort_keys=False)\n os.chmod(self.file, 0o666)\n except IOError:\n\n try:\n if not os.path.exists(CONF_PATH):\n os.makedirs(CONF_PATH)\n\n with open(self.file, 'w') as f:\n json.dump(conf, f, indent=2, sort_keys=False)\n os.chmod(self.file, 0o666)\n except:\n pass\n\ndef ntune_get(group, key):\n global ntunes\n if group not in ntunes:\n ntunes[group] = nTune(group=group)\n\n ntune = ntunes[group]\n\n if ntune.config == None or key not in ntune.config:\n ntune.read()\n\n v = ntune.config[key]\n\n if v is None:\n ntune.read()\n v = ntune.config[key]\n\n return v\n\ndef ntune_common_get(key):\n return ntune_get(\"common\", key)\n\ndef ntune_common_enabled(key):\n return ntune_common_get(key) > 0.5\n\ndef ntune_scc_get(key):\n return ntune_get(\"scc\", key)\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
BalazsSzekeres/python-template
|
[
"0ad886c06af522d34f9fe188ecfe466e8f8450dc"
] |
[
"src/foo/bar.py"
] |
[
"import numpy as np\nfrom math import factorial as std_factorial\n\n\ndef hello(name: str) -> str:\n return f\"Hello {name}!!\"\n\n\n# Implement factorial for numpy arrays:\nfactorial = np.vectorize(std_factorial)\n"
] |
[
[
"numpy.vectorize"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
areeq-hasan/qpu
|
[
"05b828eae18d5e53179b6cda5b2a97d3e16896a7"
] |
[
"qpu/__init__.py"
] |
[
"\"\"\"QuantumComputer Module\n\nDefines a quantum circuit with an input register and a memory register onto which\ninstructions can be encoded as a bitstring. The core quantum computer circuit executes\nthe instructions. The state of the memory register is read out and returned to the\nuser.\n\n\"\"\"\n\nimport numpy as np\nfrom qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit, Aer, execute\n\n\nclass QuantumComputer:\n \"\"\"\n QuantumComputer Class\n \"\"\"\n\n def __init__(self, data_size: int, addr_size: int):\n self.data_size = data_size\n self.addr_size = addr_size\n\n self.optype = QuantumRegister(1, name=\"optype\")\n self.opdata = QuantumRegister(self.data_size, name=\"opdata\")\n self.opaddr = QuantumRegister(self.addr_size, name=\"opaddr\")\n self.address = QuantumRegister(self.addr_size, name=\"address\")\n self.data = QuantumRegister(self.data_size, name=\"data\")\n\n self.meas = ClassicalRegister(self.addr_size + self.data_size, name=\"meas\")\n\n self.circuit = QuantumCircuit(\n self.address, self.data, self.opaddr, self.opdata, self.optype, self.meas\n )\n\n def program(self, program):\n \"\"\"\n Encode a program (set of instructions) onto the quantum computer.\n \"\"\"\n self.circuit.initialize(program + \"0\" * (self.data.size + self.address.size))\n self.circuit.barrier()\n\n def _init_memory(self):\n \"\"\"\n Initialize the memory register in uniform superposition for assignment.\n \"\"\"\n self.circuit.h(self.address)\n self.circuit.barrier()\n\n def _add_store_op(self):\n \"\"\"\n Add instruction handling to the quantum computer circuit for the store operation.\n \"\"\"\n self.circuit.h(self.address)\n self.circuit.barrier()\n\n self.circuit.x(self.opaddr)\n\n for bit in range(0, self.address.size, 1):\n self.circuit.mct(\n self.optype[:] + self.opaddr[bit : bit + 1],\n self.address[self.address.size - 1 - bit],\n )\n\n for bit in range(0, self.data.size, 1):\n self.circuit.mct(\n self.optype[:] + self.opdata[bit : bit + 1] + self.address[:],\n self.data[self.data.size - 1 - bit],\n )\n\n for bit in range(self.address.size - 1, -1, -1):\n self.circuit.mct(\n self.optype[:] + self.opaddr[bit : bit + 1],\n self.address[self.address.size - 1 - bit],\n )\n\n self.circuit.x(self.optype)\n\n self.circuit.barrier()\n\n def run(self):\n \"\"\"\n Add all supported instruction handlers to the quantum computer circuit.\n \"\"\"\n self._add_store_op()\n\n def get_state(self):\n \"\"\"\n Measure the state of the memory register and return the result.\n \"\"\"\n self.circuit.measure(self.address[:], self.meas[: self.addr_size])\n self.circuit.measure(\n self.data[:], self.meas[self.addr_size : self.addr_size + self.data_size]\n )\n\n self.circuit = self.circuit.reverse_bits()\n\n state = np.zeros(2 ** self.address.size)\n for reg in (\n execute(self.circuit.decompose(), Aer.get_backend(\"aer_simulator\"))\n .result()\n .get_counts()\n .keys()\n ):\n state[int(reg[: self.address.size], 2)] = int(reg[self.address.size :], 2)\n return state\n"
] |
[
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
rensutheart/flowdec
|
[
"bc817c71e00090061070f0cdeb4c80524f122950"
] |
[
"python/flowdec/psf.py"
] |
[
"\"\"\"PSF Generator Module based on Fast Gibson Lanni Approximation\n\nThis is the exact same implementation (used with permission from\nhttp://kmdouglass.github.io/posts/implementing-a-fast-gibson-lanni-psf-solver-in-python.html)\n\"\"\"\nimport argparse\nimport json\nfrom collections import OrderedDict\n\n\n# ##################\n# Gibson Lanni PSF #\n# ##################\n\n# Defaults set in python implementation\n# See: http://kmdouglass.github.io/posts/implementing-a-fast-gibson-lanni-psf-solver-in-python.html\n# # Image properties\n# # Size of the PSF array, pixels\n# size_x = 256\n# size_y = 256\n# size_z = 128\n#\n# # Precision control\n# num_basis = 100 # Number of rescaled Bessels that approximate the phase function\n# num_samples = 1000 # Number of pupil samples along radial direction\n# oversampling = 2 # Defines the upsampling ratio on the image space grid for computations\n#\n# # Microscope parameters\n# NA = 1.4\n# wavelength = 0.610 # microns\n# M = 100 # magnification\n# ns = 1.33 # specimen refractive index (RI)\n# ng0 = 1.5 # coverslip RI design value\n# ng = 1.5 # coverslip RI experimental value\n# ni0 = 1.5 # immersion medium RI design value\n# ni = 1.5 # immersion medium RI experimental value\n# ti0 = 150 # microns, working distance (immersion medium thickness) design value\n# tg0 = 170 # microns, coverslip thickness design value\n# tg = 170 # microns, coverslip thickness experimental value\n# res_lateral = 0.1 # microns\n# res_axial = 0.25 # microns\n# pZ = 2 # microns, particle distance from coverslip\n#\n# # Scaling factors for the Fourier-Bessel series expansion\n# min_wavelength = 0.436 # microns\n# scaling_factor = NA * (3 * np.arange(1, num_basis + 1) - 2) * min_wavelength / wavelength\n\n# Defaults set in docs within original Matlab implementation (MicroscPSF.m)\n# See: http://www.ee.cuhk.edu.hk/~tblu/monsite/phps/demos.php\n# % (1) image properties\n# % 'size' - the size of the 3D PSF, e.g. params.size = [256 256 128];\n# % (2) precision control\n# % 'numBasis' - the number of approximation basis, default '100'\n# % 'numSamp' - the number of sampling to determine the basis\n# % coefficients, default '1000'\n# % 'overSampling' - the oversampling ratio, default 2\n# % (3) microscope parameters\n# % 'NA' - numerical aperture of the microscope, default 1.4\n# % 'lambda' - Emission wavelength in vacuum, default 610nm\n# % 'M' - magnification factor, default 100\n# % 'ns' - specimen refractive index (RI), default 1.33\n# % 'ng0' - coverslip RI, design value, default 1.5\n# % 'ng' - coverslip RI, experimental, default 1.5\n# % 'ni0' - immersion RI, design value, default 1.5\n# % 'ni' - immersion RI, experimental, defualt 1.5\n# % 'ti0' - working distance, design, default 150um\n# % 'tg0' - coverslip thickness, design value, default 170um\n# % 'tg' - coverslip thickness, experimental, default 170um\n# % 'resLateral' - lateral pixel size, default 100nm\n# % 'resAxial' - axial pixel size, default 250nm\n# % 'pZ' - position of particle, default 2000nm\n\n\nGL_PSF_ARGS = [\n ['size_x', 256, \"Number of pixels in result (x dimension)\", \"Dimensions\"],\n ['size_y', 256, \"Number of pixels in result (y dimension)\", \"Dimensions\"],\n ['size_z', 128, \"Number of pixels in result (z dimension)\", \"Dimensions\"],\n ['na', 1.4, \"Numerical aperture of device\", \"Microscope Parameters\"],\n ['wavelength', .610, \"Wavelength of emitted light in vacuum (microns)\", \"Microscope Parameters\"],\n ['m', 100, \"Magnification factor\", \"Microscope Parameters\"],\n ['ns', 1.33, \"Specimen refractive index (RI)\", \"Microscope Parameters\"],\n ['ng0', 1.5, \"Coverslip RI, design value\", \"Microscope Parameters\"],\n ['ng', None, \"Coverslip RI, experimental (defaults to ng0 if not given)\", \"Microscope Parameters\"],\n ['ni0', 1.5, \"Immersion RI, design value\", \"Microscope Parameters\"],\n ['ni', None, \"Immersion RI, experimental (defaults to ni0 if not given)\", \"Microscope Parameters\"],\n ['ti0', 150, \"Working distance (microns)\", \"Microscope Parameters\"],\n ['tg0', 170, \"Coverslip thickness, design value (microns)\", \"Microscope Parameters\"],\n ['tg', None, \"Coverslip thickness, experimental (microns) (defaults to tg0 if not given)\", \"Microscope Parameters\"],\n ['res_lateral', 0.1, \"Lateral pizel size / resolution (microns)\", \"Microscope Parameters\"],\n ['res_axial', 0.25, \"Axial pizel size / resolution (microns)\", \"Microscope Parameters\"],\n ['pz', 0, \"Particle distance from coverslip (microns)\", \"Microscope Parameters\"],\n ['num_basis', 100, \"Number of rescaled Bessels that approximate the phase function\", \"Precision Parameters\"],\n ['num_samples', 1000, \"Number of pupil samples along radial direction\", \"Precision Parameters\"],\n ['oversampling',2, \"Defines the upsampling ratio on the image space grid for computations\",\n \"Precision Parameters\"],\n ['min_wavelength', 0.436,\"Reference wavelength used in computation of scaling factor (microns); \"\n \"See section titled 'B. Bessel series approximation' in [1] for more details\",\n \"Precision Parameters\"],\n]\n\n\nclass PSF(object):\n pass\n\n\nclass GibsonLanni(PSF):\n\n def __init__(self, **kwargs):\n \"\"\" Python implementation of fast Gibson-Lanni PSF approximation model\n\n This is based on [1] and was originally developed in Matlab before being\n ported to a Python implementation [2]. This implementation is used verbatim here\n with permission from the author Kyle Douglass.\n\n References:\n [1] - Li, J., Xue, F., & Blu, T. (2017). Fast and accurate three-dimensional point spread function\n computation for fluorescence microscopy. JOSA A, 34(6), 1029-1034.\n [2] - http://kmdouglass.github.io/posts/implementing-a-fast-gibson-lanni-psf-solver-in-python.html\n\n Args:\n See psf.GL_PSF_ARGS for a description of arguments applicable for this class. All of them\n have default values that can be overridden by keyword arguments here matching the argument name.\n Additionally, GibsonLanni.get_arg_parser().print_help() will display arguments, descriptions,\n and defaults in a readable form.\n \"\"\"\n args = GL_PSF_ARGS\n\n # Check to see if any arguments were given with invalid names\n known_args = [a[0] for a in args]\n bad_args = set(kwargs.keys()) - set(known_args)\n if len(bad_args) > 0:\n raise ValueError(\n 'The following arguments given are not valid: {}\\nValid argument names: {}'\n .format(bad_args, known_args)\n )\n\n # Assign configuration by resolving default arguments and those passed in\n self.config = OrderedDict({a[0]: a[1] for a in args})\n self.config.update(kwargs)\n\n def to_json(self):\n return json.dumps(self.config)\n\n def save(self, path):\n \"\"\"Save PSF configuration as json in the given file\"\"\"\n with open(path, 'w') as fd:\n json.dump(self.config, fd, indent=4, sort_keys=True)\n return self\n\n @staticmethod\n def load(path):\n \"\"\"Load a PSF object from a json configuration file\"\"\"\n with open(path, 'r') as fd:\n return GibsonLanni(**json.load(fd))\n\n @staticmethod\n def get_arg_parser():\n \"\"\" Get PSF argument parser and field descriptions \"\"\"\n parser = argparse.ArgumentParser(GibsonLanni.__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n groups = OrderedDict({})\n for arg in GL_PSF_ARGS:\n group = arg[3]\n if not group in groups:\n groups[group] = parser.add_argument_group(group)\n groups[group].add_argument(\n '--{}'.format(arg[0].replace('_', '-')),\n default=arg[1],\n help=arg[2]\n )\n\n return parser\n\n def generate(self):\n import numpy as np\n import scipy.special\n from scipy.interpolate import interp1d\n\n # ################# #\n # Define Parameters #\n # ################# #\n\n size_x = self.config['size_x']\n size_y = self.config['size_y']\n size_z = self.config['size_z']\n NA = self.config['na']\n wavelength = self.config['wavelength']\n M = self.config['m']\n ns = self.config['ns']\n ng0 = self.config['ng0']\n ng = self.config['ng'] or ng0\n ni0 = self.config['ni0']\n ni = self.config['ni'] or ni0\n ti0 = self.config['ti0']\n tg0 = self.config['tg0']\n tg = self.config['tg'] or tg0\n res_lateral = self.config['res_lateral']\n res_axial = self.config['res_axial']\n pZ = self.config['pz']\n num_basis = self.config['num_basis']\n num_samples = self.config['num_samples']\n oversampling = self.config['oversampling']\n min_wavelength = self.config['min_wavelength']\n\n scaling_factor = NA * (3 * np.arange(1, num_basis + 1) - 2) * min_wavelength / wavelength\n\n # ############################# #\n # Create the coordinate systems #\n # ############################# #\n\n # Place the origin at the center of the final PSF array\n x0 = (size_x - 1) / 2\n y0 = (size_y - 1) / 2\n\n # Find the maximum possible radius coordinate of the PSF array by finding the distance\n # from the center of the array to a corner\n max_radius = round(np.sqrt((size_x - x0) * (size_x - x0) + (size_y - y0) * (size_y - y0))) + 1;\n\n # Radial coordinates, image space\n r = res_lateral * np.arange(0, oversampling * max_radius) / oversampling\n\n # Radial coordinates, pupil space\n a = min([NA, ns, ni, ni0, ng, ng0]) / NA\n rho = np.linspace(0, a, num_samples)\n\n # Stage displacements away from best focus\n z = res_axial * np.arange(-size_z / 2, size_z /2) + res_axial / 2\n\n # ######################################################## #\n # Approximate the pupil phase with a Fourier-Bessel series #\n # ######################################################## #\n\n # Define the wavefront aberration\n OPDs = pZ * np.sqrt(ns * ns - NA * NA * rho * rho) # OPD in the sample\n OPDi = (z.reshape(-1,1) + ti0) * np.sqrt(ni * ni - NA * NA * rho * rho) - ti0 * np.sqrt(ni0 * ni0 - NA * NA * rho * rho) # OPD in the immersion medium\n OPDg = tg * np.sqrt(ng * ng - NA * NA * rho * rho) - tg0 * np.sqrt(ng0 * ng0 - NA * NA * rho * rho) # OPD in the coverslip\n W = 2 * np.pi / wavelength * (OPDs + OPDi + OPDg)\n\n # Sample the phase\n # Shape is (number of z samples by number of rho samples)\n phase = np.cos(W) + 1j * np.sin(W)\n\n # Define the basis of Bessel functions\n # Shape is (number of basis functions by number of rho samples)\n J = scipy.special.jv(0, scaling_factor.reshape(-1, 1) * rho)\n\n # Compute the approximation to the sampled pupil phase by finding the least squares\n # solution to the complex coefficients of the Fourier-Bessel expansion.\n # Shape of C is (number of basis functions by number of z samples).\n # Note the matrix transposes to get the dimensions correct.\n C, residuals, _, _ = np.linalg.lstsq(J.T, phase.T, rcond=None)\n\n # ############### #\n # Compute the PSF #\n # ############### #\n b = 2 * np. pi * r.reshape(-1, 1) * NA / wavelength\n\n # Convenience functions for J0 and J1 Bessel functions\n J0 = lambda x: scipy.special.jv(0, x.tolist())\n J1 = lambda x: scipy.special.jv(1, x.tolist())\n\n # See equation 5 in Li, Xue, and Blu\n denom = scaling_factor * scaling_factor - b * b\n R = (scaling_factor * J1(scaling_factor * a) * J0(b * a) * a - b * J0(scaling_factor * a) * J1(b * a) * a)\n R /= denom\n\n # The transpose places the axial direction along the first dimension of the array, i.e. rows\n # This is only for convenience.\n PSF_rz = (np.abs(R.dot(C))**2).T\n\n # Normalize to the maximum value\n PSF_rz /= np.max(PSF_rz)\n\n # ############################################################# #\n # Resample the PSF onto a rotationally-symmetric Cartesian grid #\n # ############################################################# #\n\n # Create the fleshed-out xy grid of radial distances from the center\n xy = np.mgrid[0:size_y, 0:size_x]\n r_pixel = np.sqrt((xy[1] - x0) * (xy[1] - x0) + (xy[0] - y0) * (xy[0] - y0)) * res_lateral\n\n PSF = np.zeros((size_y, size_x, size_z))\n\n for z_index in range(PSF.shape[2]):\n # Interpolate the radial PSF function\n PSF_interp = interp1d(r.flatten(), PSF_rz[z_index, :].flatten())\n\n # Evaluate the PSF at each value of r_pixel\n PSF[:,:, z_index] = PSF_interp(r_pixel.ravel()).reshape(size_y, size_x)\n\n # **All lines below are changes to original implementation** #\n\n # Transform to [z, y, x] instead of [y, x, z]\n PSF = np.moveaxis(PSF, 2, 0)\n\n # Re-normalize to a max of 1\n return PSF / np.max(PSF)\n\n\n"
] |
[
[
"numpy.sqrt",
"numpy.linspace",
"numpy.arange",
"numpy.cos",
"numpy.sin",
"numpy.linalg.lstsq",
"numpy.max",
"numpy.moveaxis",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
huseinzol05/Hackathon-Huseinhouse
|
[
"6796af2fe02f10d7860ac8db27bd24fa27b8bb01",
"6796af2fe02f10d7860ac8db27bd24fa27b8bb01",
"6796af2fe02f10d7860ac8db27bd24fa27b8bb01"
] |
[
"Hackathon/Emotion-Music/utils_emotion.py",
"Hackathon/Vandalism-Detection/model/function.py",
"Hackathon/Emotion-Music/utils_person.py"
] |
[
"import numpy as np\nimport os\nfrom sklearn.preprocessing import LabelEncoder\n\ndef get_dataset():\n \n list_folder = os.listdir('data/')\n list_images = []\n for i in xrange(len(list_folder)):\n images = os.listdir('data/' + list_folder[i])\n for x in xrange(len(images)):\n image = [list_folder[i] + '/' + images[x], list_folder[i]]\n list_images.append(image)\n \n list_images = np.array(list_images)\n np.random.shuffle(list_images)\n \n label = np.unique(list_images[:, 1]).tolist()\n \n list_images[:, 1] = LabelEncoder().fit_transform(list_images[:, 1])\n \n return list_images, np.unique(list_images[:, 1]).shape[0], label",
"import tensorflow as tf\nimport custom_layers\nslim = tf.contrib.slim\n\ndef get_shape(x, rank=None):\n if x.get_shape().is_fully_defined():\n return x.get_shape().as_list()\n else:\n static_shape = x.get_shape()\n if rank is None:\n static_shape = static_shape.as_list()\n rank = len(static_shape)\n else:\n static_shape = x.get_shape().with_rank(rank).as_list()\n dynamic_shape = tf.unstack(tf.shape(x), rank)\n return [s if s is not None else d\n for s, d in zip(static_shape, dynamic_shape)]\n\ndef ssd_multibox_layer(inputs, num_classes, sizes, ratios = [1], normalization = -1, bn_normalization = False):\n net = inputs\n if normalization > 0:\n net = custom_layers.l2_normalization(net, scaling=True)\n # Number of anchors.\n num_anchors = len(sizes) + len(ratios)\n\n # Location.\n num_loc_pred = num_anchors * 4\n loc_pred = slim.conv2d(net, num_loc_pred, [3, 3], activation_fn=None,\n scope='conv_loc')\n loc_pred = custom_layers.channel_to_last(loc_pred)\n loc_pred = tf.reshape(loc_pred,\n get_shape(loc_pred, 4)[:-1]+[num_anchors, 4])\n # Class prediction.\n num_cls_pred = num_anchors * num_classes\n cls_pred = slim.conv2d(net, num_cls_pred, [3, 3], activation_fn=None,\n scope='conv_cls')\n cls_pred = custom_layers.channel_to_last(cls_pred)\n cls_pred = tf.reshape(cls_pred,\n get_shape(cls_pred, 4)[:-1]+[num_anchors, num_classes])\n return cls_pred, loc_pred\n\ndef bboxes_sort(scores, bboxes, top_k=400, scope=None):\n # Dictionaries as inputs.\n if isinstance(scores, dict) or isinstance(bboxes, dict):\n with tf.name_scope(scope, 'bboxes_sort_dict'):\n d_scores = {}\n d_bboxes = {}\n for c in scores.keys():\n s, b = bboxes_sort(scores[c], bboxes[c], top_k=top_k)\n d_scores[c] = s\n d_bboxes[c] = b\n return d_scores, d_bboxes\n\n # Tensors inputs.\n with tf.name_scope(scope, 'bboxes_sort', [scores, bboxes]):\n # Sort scores...\n scores, idxes = tf.nn.top_k(scores, k=top_k, sorted=True)\n\n # Trick to be able to use tf.gather: map for each element in the first dim.\n def fn_gather(bboxes, idxes):\n bb = tf.gather(bboxes, idxes)\n return [bb]\n r = tf.map_fn(lambda x: fn_gather(x[0], x[1]),\n [bboxes, idxes],\n dtype=[bboxes.dtype],\n parallel_iterations=10,\n back_prop=False,\n swap_memory=False,\n infer_shape=True)\n bboxes = r[0]\n return scores, bboxes\n\t\ndef bboxes_nms_batch(scores, bboxes, nms_threshold=0.5, keep_top_k=200, scope=None):\n # Dictionaries as inputs.\n if isinstance(scores, dict) or isinstance(bboxes, dict):\n with tf.name_scope(scope, 'bboxes_nms_batch_dict'):\n d_scores = {}\n d_bboxes = {}\n for c in scores.keys():\n s, b = bboxes_nms_batch(scores[c], bboxes[c],\n nms_threshold=nms_threshold,\n keep_top_k=keep_top_k)\n d_scores[c] = s\n d_bboxes[c] = b\n return d_scores, d_bboxes\n\n # Tensors inputs.\n with tf.name_scope(scope, 'bboxes_nms_batch'):\n r = tf.map_fn(lambda x: bboxes_nms(x[0], x[1],\n nms_threshold, keep_top_k),\n (scores, bboxes),\n dtype=(scores.dtype, bboxes.dtype),\n parallel_iterations=10,\n back_prop=False,\n swap_memory=False,\n infer_shape=True)\n scores, bboxes = r\n return scores, bboxes\n",
"import numpy as np\nimport os\nfrom sklearn.preprocessing import LabelEncoder\n\ndef get_dataset():\n \n list_folder = os.listdir('dataperson/')\n list_images = []\n for i in xrange(len(list_folder)):\n images = os.listdir('dataperson/' + list_folder[i])\n for x in xrange(len(images)):\n image = [list_folder[i] + '/' + images[x], list_folder[i]]\n list_images.append(image)\n \n list_images = np.array(list_images)\n np.random.shuffle(list_images)\n \n label = np.unique(list_images[:, 1]).tolist()\n \n list_images[:, 1] = LabelEncoder().fit_transform(list_images[:, 1])\n \n return list_images, np.unique(list_images[:, 1]).shape[0], label"
] |
[
[
"sklearn.preprocessing.LabelEncoder",
"numpy.array",
"numpy.random.shuffle",
"numpy.unique"
],
[
"tensorflow.nn.top_k",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.shape"
],
[
"sklearn.preprocessing.LabelEncoder",
"numpy.array",
"numpy.random.shuffle",
"numpy.unique"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
yskn67/xgboost
|
[
"70a4c419e966fea9418bc09dfec19e41215f7fb2"
] |
[
"tests/python-gpu/test_gpu_updaters.py"
] |
[
"from __future__ import print_function\n\nimport sys\n\nsys.path.append(\"../../tests/python\")\nimport xgboost as xgb\nimport numpy as np\nimport unittest\nfrom nose.plugins.attrib import attr\nfrom sklearn.datasets import load_digits, load_boston, load_breast_cancer, make_regression\nimport itertools as it\n\nrng = np.random.RandomState(1994)\n\n\ndef non_increasing(L, tolerance):\n return all((y - x) < tolerance for x, y in zip(L, L[1:]))\n\n\n# Check result is always decreasing and final accuracy is within tolerance\ndef assert_accuracy(res, tree_method, comparison_tree_method, tolerance, param):\n assert non_increasing(res[tree_method], tolerance)\n assert np.allclose(res[tree_method][-1], res[comparison_tree_method][-1], 1e-3, 1e-2)\n\n\ndef train_boston(param_in, comparison_tree_method):\n data = load_boston()\n dtrain = xgb.DMatrix(data.data, label=data.target)\n param = {}\n param.update(param_in)\n param['max_depth'] = 2\n res_tmp = {}\n res = {}\n num_rounds = 10\n bst = xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')], evals_result=res_tmp)\n res[param['tree_method']] = res_tmp['train']['rmse']\n param[\"tree_method\"] = comparison_tree_method\n bst = xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')], evals_result=res_tmp)\n res[comparison_tree_method] = res_tmp['train']['rmse']\n\n return res\n\n\ndef train_digits(param_in, comparison_tree_method):\n data = load_digits()\n dtrain = xgb.DMatrix(data.data, label=data.target)\n param = {}\n param['objective'] = 'multi:softmax'\n param['num_class'] = 10\n param.update(param_in)\n res_tmp = {}\n res = {}\n num_rounds = 10\n xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')], evals_result=res_tmp)\n res[param['tree_method']] = res_tmp['train']['merror']\n param[\"tree_method\"] = comparison_tree_method\n xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')], evals_result=res_tmp)\n res[comparison_tree_method] = res_tmp['train']['merror']\n return res\n\n\ndef train_cancer(param_in, comparison_tree_method):\n data = load_breast_cancer()\n dtrain = xgb.DMatrix(data.data, label=data.target)\n param = {}\n param['objective'] = 'binary:logistic'\n param.update(param_in)\n res_tmp = {}\n res = {}\n num_rounds = 10\n xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')], evals_result=res_tmp)\n res[param['tree_method']] = res_tmp['train']['error']\n param[\"tree_method\"] = comparison_tree_method\n xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')], evals_result=res_tmp)\n res[comparison_tree_method] = res_tmp['train']['error']\n return res\n\n\ndef train_sparse(param_in, comparison_tree_method):\n n = 5000\n sparsity = 0.75\n X, y = make_regression(n, random_state=rng)\n X = np.array([[np.nan if rng.uniform(0, 1) < sparsity else x for x in x_row] for x_row in X])\n dtrain = xgb.DMatrix(X, label=y)\n param = {}\n param.update(param_in)\n res_tmp = {}\n res = {}\n num_rounds = 10\n bst = xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')], evals_result=res_tmp)\n res[param['tree_method']] = res_tmp['train']['rmse']\n param[\"tree_method\"] = comparison_tree_method\n bst = xgb.train(param, dtrain, num_rounds, [(dtrain, 'train')], evals_result=res_tmp)\n res[comparison_tree_method] = res_tmp['train']['rmse']\n return res\n\n\n# Enumerates all permutations of variable parameters\ndef assert_updater_accuracy(tree_method, comparison_tree_method, variable_param, tolerance):\n param = {'tree_method': tree_method }\n names = sorted(variable_param)\n combinations = it.product(*(variable_param[Name] for Name in names))\n\n for set in combinations:\n print(names, file=sys.stderr)\n print(set, file=sys.stderr)\n param_tmp = param.copy()\n for i, name in enumerate(names):\n param_tmp[name] = set[i]\n\n print(param_tmp, file=sys.stderr)\n assert_accuracy(train_boston(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)\n assert_accuracy(train_digits(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)\n assert_accuracy(train_cancer(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)\n assert_accuracy(train_sparse(param_tmp, comparison_tree_method), tree_method, comparison_tree_method, tolerance, param_tmp)\n\n\n@attr('gpu')\nclass TestGPU(unittest.TestCase):\n def test_gpu_hist(self):\n variable_param = {'max_depth': [2, 6, 11], 'max_bin': [2, 16, 1024], 'n_gpus': [1, -1]}\n assert_updater_accuracy('gpu_hist', 'hist', variable_param, 0.02)\n\n def test_gpu_exact(self):\n variable_param = {'max_depth': [2, 6, 15]}\n assert_updater_accuracy('gpu_exact', 'exact', variable_param, 0.02)\n\n def test_gpu_hist_experimental(self):\n variable_param = {'n_gpus': [1, -1], 'max_depth': [2, 6], 'max_leaves': [255, 4], 'max_bin': [2, 16, 1024]}\n assert_updater_accuracy('gpu_hist_experimental', 'hist', variable_param, 0.01)\n"
] |
[
[
"numpy.allclose",
"sklearn.datasets.load_breast_cancer",
"sklearn.datasets.make_regression",
"sklearn.datasets.load_digits",
"sklearn.datasets.load_boston",
"numpy.random.RandomState"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
incredelous/bert_reader
|
[
"d7a54fce83a5678777a02bc50176e7fa527d7f9f"
] |
[
"run_forum_data.py"
] |
[
"# coding=utf-8\n# Copyright 2018 The Google AI Language Team Authors.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Run jiankangyun forum data to extract needed answer\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport collections\nimport json\nimport math\nimport os\nimport random\nimport modeling\nimport optimization\nimport tokenization\nimport six\nimport tensorflow as tf\nfrom tqdm import tqdm\nimport re\n\nflags = tf.flags\n\nFLAGS = flags.FLAGS\n\n## Required parameters\nflags.DEFINE_string(\n \"bert_config_file\", None,\n \"The config json file corresponding to the pre-trained BERT model. \"\n \"This specifies the model architecture.\")\n\nflags.DEFINE_string(\"vocab_file\", None,\n \"The vocabulary file that the BERT model was trained on.\")\n\nflags.DEFINE_string(\n \"output_dir\", None,\n \"The output directory where the model checkpoints will be written.\")\n\n## Other parameters\nflags.DEFINE_string(\"train_file\", None,\n \"SQuAD json for training. E.g., train-v1.1.json\")\n\nflags.DEFINE_string(\n \"predict_file\", None,\n \"SQuAD json for predictions. E.g., dev-v1.1.json or test-v1.1.json\")\n\nflags.DEFINE_string(\n \"init_checkpoint\", None,\n \"Initial checkpoint (usually from a pre-trained BERT model).\")\n\nflags.DEFINE_bool(\n \"do_lower_case\", True,\n \"Whether to lower case the input text. Should be True for uncased \"\n \"models and False for cased models.\")\n\nflags.DEFINE_integer(\n \"max_seq_length\", 384,\n \"The maximum total input sequence length after WordPiece tokenization. \"\n \"Sequences longer than this will be truncated, and sequences shorter \"\n \"than this will be padded.\")\n\nflags.DEFINE_integer(\n \"doc_stride\", 128,\n \"When splitting up a long document into chunks, how much stride to \"\n \"take between chunks.\")\n\nflags.DEFINE_integer(\n \"max_query_length\", 64,\n \"The maximum number of tokens for the question. Questions longer than \"\n \"this will be truncated to this length.\")\n\nflags.DEFINE_bool(\"do_train\", False, \"Whether to run training.\")\n\nflags.DEFINE_bool(\"do_predict\", False, \"Whether to run eval on the dev set.\")\n\nflags.DEFINE_integer(\"train_batch_size\", 32, \"Total batch size for training.\")\n\nflags.DEFINE_integer(\"predict_batch_size\", 8,\n \"Total batch size for predictions.\")\n\nflags.DEFINE_float(\"learning_rate\", 5e-5, \"The initial learning rate for Adam.\")\n\nflags.DEFINE_float(\"num_train_epochs\", 3.0,\n \"Total number of training epochs to perform.\")\n\nflags.DEFINE_float(\n \"warmup_proportion\", 0.1,\n \"Proportion of training to perform linear learning rate warmup for. \"\n \"E.g., 0.1 = 10% of training.\")\n\nflags.DEFINE_integer(\"save_checkpoints_steps\", 1000,\n \"How often to save the model checkpoint.\")\n\nflags.DEFINE_integer(\"iterations_per_loop\", 1000,\n \"How many steps to make in each estimator call.\")\n\nflags.DEFINE_integer(\n \"n_best_size\", 20,\n \"The total number of n-best predictions to generate in the \"\n \"nbest_predictions.json output file.\")\n\nflags.DEFINE_integer(\n \"max_answer_length\", 30,\n \"The maximum length of an answer that can be generated. This is needed \"\n \"because the start and end predictions are not conditioned on one another.\")\n\nflags.DEFINE_bool(\"use_tpu\", False, \"Whether to use TPU or GPU/CPU.\")\n\ntf.flags.DEFINE_string(\n \"tpu_name\", None,\n \"The Cloud TPU to use for training. This should be either the name \"\n \"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 \"\n \"url.\")\n\ntf.flags.DEFINE_string(\n \"tpu_zone\", None,\n \"[Optional] GCE zone where the Cloud TPU is located in. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\n \"gcp_project\", None,\n \"[Optional] Project name for the Cloud TPU-enabled project. If not \"\n \"specified, we will attempt to automatically detect the GCE project from \"\n \"metadata.\")\n\ntf.flags.DEFINE_string(\"master\", None, \"[Optional] TensorFlow master URL.\")\n\nflags.DEFINE_integer(\n \"num_tpu_cores\", 8,\n \"Only used if `use_tpu` is True. Total number of TPU cores to use.\")\n\nflags.DEFINE_bool(\n \"verbose_logging\", False,\n \"If true, all of the warnings related to data processing will be printed. \"\n \"A number of warnings are expected for a normal SQuAD evaluation.\")\n\nflags.DEFINE_bool(\n \"version_2_with_negative\", False,\n \"If true, the SQuAD examples contain some that do not have an answer.\")\n\nflags.DEFINE_float(\n \"null_score_diff_threshold\", 0.0,\n \"If null_score - best_non_null is greater than the threshold predict null.\")\n\n\nclass ForumExample(object):\n \"\"\"A single training/test example for simple sequence classification.\n\n For examples without an answer, the start and end position are -1.\n \"\"\"\n\n def __init__(self,\n qas_id,\n question_text,\n doc_tokens,\n orig_answer_text=None,\n start_position=None,\n end_position=None,\n is_impossible=False):\n self.qas_id = qas_id\n self.question_text = question_text\n self.doc_tokens = doc_tokens\n self.orig_answer_text = orig_answer_text\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n def __str__(self):\n return self.__repr__()\n\n def __repr__(self):\n s = \"\"\n s += \"qas_id: %s\" % (tokenization.printable_text(self.qas_id))\n s += \", question_text: %s\" % (\n tokenization.printable_text(self.question_text))\n if self.start_position:\n s += \", start_position: %d\" % (self.start_position)\n if self.start_position:\n s += \", end_position: %d\" % (self.end_position)\n if self.start_position:\n s += \", is_impossible: %r\" % (self.is_impossible)\n return s\n\n\nclass InputFeatures(object):\n \"\"\"A single set of features of data.\"\"\"\n\n def __init__(self,\n unique_id,\n example_index,\n doc_span_index,\n tokens,\n token_to_orig_map,\n token_is_max_context,\n input_ids,\n input_mask,\n segment_ids,\n start_position=None,\n end_position=None,\n is_impossible=None):\n self.unique_id = unique_id\n self.example_index = example_index\n self.doc_span_index = doc_span_index\n self.tokens = tokens\n self.token_to_orig_map = token_to_orig_map\n self.token_is_max_context = token_is_max_context\n self.input_ids = input_ids\n self.input_mask = input_mask\n self.segment_ids = segment_ids\n self.start_position = start_position\n self.end_position = end_position\n self.is_impossible = is_impossible\n\n\ndef read_forum_data(input_file, is_training):\n examples = []\n\n def is_whitespace(c):\n if c == \" \" or c == \"\\t\" or c == \"\\r\" or c == \"\\n\" or ord(c) == 0x202F:\n return True\n return False\n\n with open(FLAGS.train_file, 'r', encoding='utf-8') as f:\n input_data = json.loads(f.read())\n tf.logging.info('read {} forum data'.format('training' if is_training else 'dev'))\n for _, forum_id in enumerate(tqdm(input_data.keys())):\n question_text = input_data[forum_id]['title']\n correct_id = input_data[forum_id]['Correct_ID']\n answer = input_data[forum_id]['Ans'][correct_id]\n if len(answer) > FLAGS.doc_stride:\n tf.logging.warning('answer is too long %s' % answer)\n continue\n orig_answer_text = \"\".join(input_data[forum_id]['Ans'])\n doc_tokens = []\n char_to_word_offset = []\n prev_is_whitespace = True\n\n for c in orig_answer_text:\n if is_whitespace(c):\n prev_is_whitespace = True\n else:\n if prev_is_whitespace:\n doc_tokens.append(c)\n else:\n doc_tokens[-1] += c\n prev_is_whitespace = False\n char_to_word_offset.append(len(doc_tokens) - 1) # 对应中文字符在doc tokens中哪一段,以空格,Tab,换行为分段符\n regex = re.search(re.escape(answer), orig_answer_text)\n if regex:\n answer_offset, _ = regex.span()\n else:\n tf.logging.warning('can not find answer from orig_answer_text: %s vs %s' % (answer, orig_answer_text))\n continue\n answer_length = len(answer)\n start_position = char_to_word_offset[answer_offset]\n end_position = char_to_word_offset[answer_offset + answer_length - 1]\n example = ForumExample(\n qas_id=forum_id,\n question_text=question_text,\n doc_tokens=doc_tokens,\n orig_answer_text=answer,\n start_position=start_position,\n end_position=end_position)\n examples.append(example)\n return examples\n\n\ndef convert_examples_to_features(examples, tokenizer, max_seq_length,\n doc_stride, max_query_length, is_training,\n output_fn):\n \"\"\"Loads a data file into a list of `InputBatch`s.\"\"\"\n\n unique_id = 1000000000\n tf.logging.info('convert {} examples to features'.format('training' if is_training else 'dev')) \n for (example_index, example) in enumerate(tqdm(examples)):\n query_tokens = tokenizer.tokenize(example.question_text)\n\n if len(query_tokens) > max_query_length:\n query_tokens = query_tokens[0:max_query_length]\n\n tok_to_orig_index = []\n orig_to_tok_index = []\n all_doc_tokens = []\n for (i, token) in enumerate(example.doc_tokens):\n orig_to_tok_index.append(len(all_doc_tokens))\n sub_tokens = tokenizer.tokenize(token)\n for sub_token in sub_tokens:\n tok_to_orig_index.append(i)\n all_doc_tokens.append(sub_token)\n\n tok_start_position = None\n tok_end_position = None\n if is_training and example.is_impossible:\n tok_start_position = -1\n tok_end_position = -1\n if is_training and not example.is_impossible:\n tok_start_position = orig_to_tok_index[example.start_position]\n if example.end_position < len(example.doc_tokens) - 1:\n tok_end_position = orig_to_tok_index[example.end_position + 1] - 1\n else:\n tok_end_position = len(all_doc_tokens) - 1\n (tok_start_position, tok_end_position) = _improve_answer_span(\n all_doc_tokens, tok_start_position, tok_end_position, tokenizer,\n example.orig_answer_text)\n\n # The -3 accounts for [CLS], [SEP] and [SEP]\n max_tokens_for_doc = max_seq_length - len(query_tokens) - 3\n\n # We can have documents that are longer than the maximum sequence length.\n # To deal with this we do a sliding window approach, where we take chunks\n # of the up to our max length with a stride of `doc_stride`.\n _DocSpan = collections.namedtuple( # pylint: disable=invalid-name\n \"DocSpan\", [\"start\", \"length\"])\n doc_spans = []\n start_offset = 0\n while start_offset < len(all_doc_tokens):\n length = len(all_doc_tokens) - start_offset\n if length > max_tokens_for_doc:\n length = max_tokens_for_doc\n doc_spans.append(_DocSpan(start=start_offset, length=length))\n if start_offset + length == len(all_doc_tokens):\n break\n start_offset += min(length, doc_stride)\n\n for (doc_span_index, doc_span) in enumerate(doc_spans):\n tokens = []\n token_to_orig_map = {}\n token_is_max_context = {}\n segment_ids = []\n tokens.append(\"[CLS]\")\n segment_ids.append(0)\n for token in query_tokens:\n tokens.append(token)\n segment_ids.append(0)\n tokens.append(\"[SEP]\")\n segment_ids.append(0)\n\n for i in range(doc_span.length):\n split_token_index = doc_span.start + i\n token_to_orig_map[len(tokens)] = tok_to_orig_index[split_token_index]\n\n is_max_context = _check_is_max_context(doc_spans, doc_span_index,\n split_token_index)\n token_is_max_context[len(tokens)] = is_max_context\n tokens.append(all_doc_tokens[split_token_index])\n segment_ids.append(1)\n tokens.append(\"[SEP]\")\n segment_ids.append(1)\n\n input_ids = tokenizer.convert_tokens_to_ids(tokens)\n\n # The mask has 1 for real tokens and 0 for padding tokens. Only real\n # tokens are attended to.\n input_mask = [1] * len(input_ids)\n\n # Zero-pad up to the sequence length.\n while len(input_ids) < max_seq_length:\n input_ids.append(0)\n input_mask.append(0)\n segment_ids.append(0)\n\n assert len(input_ids) == max_seq_length\n assert len(input_mask) == max_seq_length\n assert len(segment_ids) == max_seq_length\n\n start_position = None\n end_position = None\n if is_training and not example.is_impossible:\n # For training, if our document chunk does not contain an annotation\n # we throw it out, since there is nothing to predict.\n doc_start = doc_span.start\n doc_end = doc_span.start + doc_span.length - 1\n out_of_span = False\n if not (tok_start_position >= doc_start and\n tok_end_position <= doc_end):\n out_of_span = True\n if out_of_span:\n start_position = 0\n end_position = 0\n else:\n doc_offset = len(query_tokens) + 2\n start_position = tok_start_position - doc_start + doc_offset\n end_position = tok_end_position - doc_start + doc_offset\n\n if is_training and example.is_impossible:\n start_position = 0\n end_position = 0\n\n if example_index < 20:\n tf.logging.info(\"*** Example ***\")\n tf.logging.info(\"unique_id: %s\" % (unique_id))\n tf.logging.info(\"example_index: %s\" % (example_index))\n tf.logging.info(\"doc_span_index: %s\" % (doc_span_index))\n tf.logging.info(\"tokens: %s\" % \" \".join(\n [tokenization.printable_text(x) for x in tokens]))\n tf.logging.info(\"token_to_orig_map: %s\" % \" \".join(\n [\"%d:%d\" % (x, y) for (x, y) in six.iteritems(token_to_orig_map)]))\n tf.logging.info(\"token_is_max_context: %s\" % \" \".join([\n \"%d:%s\" % (x, y) for (x, y) in six.iteritems(token_is_max_context)\n ]))\n tf.logging.info(\"input_ids: %s\" % \" \".join([str(x) for x in input_ids]))\n tf.logging.info(\n \"input_mask: %s\" % \" \".join([str(x) for x in input_mask]))\n tf.logging.info(\n \"segment_ids: %s\" % \" \".join([str(x) for x in segment_ids]))\n if is_training and example.is_impossible:\n tf.logging.info(\"impossible example\")\n if is_training and not example.is_impossible:\n answer_text = \" \".join(tokens[start_position:(end_position + 1)])\n tf.logging.info(\"start_position: %d\" % (start_position))\n tf.logging.info(\"end_position: %d\" % (end_position))\n tf.logging.info(\n \"answer: %s\" % (tokenization.printable_text(answer_text)))\n\n feature = InputFeatures(\n unique_id=unique_id,\n example_index=example_index,\n doc_span_index=doc_span_index,\n tokens=tokens,\n token_to_orig_map=token_to_orig_map,\n token_is_max_context=token_is_max_context,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n start_position=start_position,\n end_position=end_position,\n is_impossible=example.is_impossible)\n\n # Run callback\n output_fn(feature)\n\n unique_id += 1\n\n\ndef _improve_answer_span(doc_tokens, input_start, input_end, tokenizer,\n orig_answer_text):\n \"\"\"Returns tokenized answer spans that better match the annotated answer.\"\"\"\n\n # The SQuAD annotations are character based. We first project them to\n # whitespace-tokenized words. But then after WordPiece tokenization, we can\n # often find a \"better match\". For example:\n #\n # Question: What year was John Smith born?\n # Context: The leader was John Smith (1895-1943).\n # Answer: 1895\n #\n # The original whitespace-tokenized answer will be \"(1895-1943).\". However\n # after tokenization, our tokens will be \"( 1895 - 1943 ) .\". So we can match\n # the exact answer, 1895.\n #\n # However, this is not always possible. Consider the following:\n #\n # Question: What country is the top exporter of electornics?\n # Context: The Japanese electronics industry is the lagest in the world.\n # Answer: Japan\n #\n # In this case, the annotator chose \"Japan\" as a character sub-span of\n # the word \"Japanese\". Since our WordPiece tokenizer does not split\n # \"Japanese\", we just use \"Japanese\" as the annotation. This is fairly rare\n # in SQuAD, but does happen.\n tok_answer_text = \" \".join(tokenizer.tokenize(orig_answer_text))\n\n for new_start in range(input_start, input_end + 1):\n for new_end in range(input_end, new_start - 1, -1):\n text_span = \" \".join(doc_tokens[new_start:(new_end + 1)])\n if text_span == tok_answer_text:\n return (new_start, new_end)\n\n return (input_start, input_end)\n\n\ndef _check_is_max_context(doc_spans, cur_span_index, position):\n \"\"\"Check if this is the 'max context' doc span for the token.\"\"\"\n\n # Because of the sliding window approach taken to scoring documents, a single\n # token can appear in multiple documents. E.g.\n # Doc: the man went to the store and bought a gallon of milk\n # Span A: the man went to the\n # Span B: to the store and bought\n # Span C: and bought a gallon of\n # ...\n #\n # Now the word 'bought' will have two scores from spans B and C. We only\n # want to consider the score with \"maximum context\", which we define as\n # the *minimum* of its left and right context (the *sum* of left and\n # right context will always be the same, of course).\n #\n # In the example the maximum context for 'bought' would be span C since\n # it has 1 left context and 3 right context, while span B has 4 left context\n # and 0 right context.\n best_score = None\n best_span_index = None\n for (span_index, doc_span) in enumerate(doc_spans):\n end = doc_span.start + doc_span.length - 1\n if position < doc_span.start:\n continue\n if position > end:\n continue\n num_left_context = position - doc_span.start\n num_right_context = end - position\n score = min(num_left_context, num_right_context) + 0.01 * doc_span.length\n if best_score is None or score > best_score:\n best_score = score\n best_span_index = span_index\n\n return cur_span_index == best_span_index\n\n\ndef create_model(bert_config, is_training, input_ids, input_mask, segment_ids,\n use_one_hot_embeddings):\n \"\"\"Creates a classification model.\"\"\"\n model = modeling.BertModel(\n config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n token_type_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n final_hidden = model.get_sequence_output()\n\n final_hidden_shape = modeling.get_shape_list(final_hidden, expected_rank=3)\n batch_size = final_hidden_shape[0]\n seq_length = final_hidden_shape[1]\n hidden_size = final_hidden_shape[2]\n\n output_weights = tf.get_variable(\n \"cls/squad/output_weights\", [2, hidden_size],\n initializer=tf.truncated_normal_initializer(stddev=0.02))\n\n output_bias = tf.get_variable(\n \"cls/squad/output_bias\", [2], initializer=tf.zeros_initializer())\n\n final_hidden_matrix = tf.reshape(final_hidden,\n [batch_size * seq_length, hidden_size])\n logits = tf.matmul(final_hidden_matrix, output_weights, transpose_b=True)\n logits = tf.nn.bias_add(logits, output_bias)\n\n logits = tf.reshape(logits, [batch_size, seq_length, 2])\n logits = tf.transpose(logits, [2, 0, 1])\n\n unstacked_logits = tf.unstack(logits, axis=0)\n\n (start_logits, end_logits) = (unstacked_logits[0], unstacked_logits[1])\n\n return (start_logits, end_logits)\n\n\ndef model_fn_builder(bert_config, init_checkpoint, learning_rate,\n num_train_steps, num_warmup_steps, use_tpu,\n use_one_hot_embeddings):\n \"\"\"Returns `model_fn` closure for TPUEstimator.\"\"\"\n\n def model_fn(features, labels, mode, params): # pylint: disable=unused-argument\n \"\"\"The `model_fn` for TPUEstimator.\"\"\"\n\n tf.logging.info(\"*** Features ***\")\n for name in sorted(features.keys()):\n tf.logging.info(\" name = %s, shape = %s\" % (name, features[name].shape))\n\n unique_ids = features[\"unique_ids\"]\n input_ids = features[\"input_ids\"]\n input_mask = features[\"input_mask\"]\n segment_ids = features[\"segment_ids\"]\n\n is_training = (mode == tf.estimator.ModeKeys.TRAIN)\n\n (start_logits, end_logits) = create_model(\n bert_config=bert_config,\n is_training=is_training,\n input_ids=input_ids,\n input_mask=input_mask,\n segment_ids=segment_ids,\n use_one_hot_embeddings=use_one_hot_embeddings)\n\n tvars = tf.trainable_variables()\n\n initialized_variable_names = {}\n scaffold_fn = None\n if init_checkpoint:\n (assignment_map, initialized_variable_names\n ) = modeling.get_assignment_map_from_checkpoint(tvars, init_checkpoint)\n if use_tpu:\n\n def tpu_scaffold():\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n return tf.train.Scaffold()\n\n scaffold_fn = tpu_scaffold\n else:\n tf.train.init_from_checkpoint(init_checkpoint, assignment_map)\n\n tf.logging.info(\"**** Trainable Variables ****\")\n for var in tvars:\n init_string = \"\"\n if var.name in initialized_variable_names:\n init_string = \", *INIT_FROM_CKPT*\"\n tf.logging.info(\" name = %s, shape = %s%s\", var.name, var.shape,\n init_string)\n\n output_spec = None\n if mode == tf.estimator.ModeKeys.TRAIN:\n seq_length = modeling.get_shape_list(input_ids)[1]\n\n def compute_loss(logits, positions):\n one_hot_positions = tf.one_hot(\n positions, depth=seq_length, dtype=tf.float32)\n log_probs = tf.nn.log_softmax(logits, axis=-1)\n loss = -tf.reduce_mean(\n tf.reduce_sum(one_hot_positions * log_probs, axis=-1))\n return loss\n\n start_positions = features[\"start_positions\"]\n end_positions = features[\"end_positions\"]\n\n start_loss = compute_loss(start_logits, start_positions)\n end_loss = compute_loss(end_logits, end_positions)\n\n total_loss = (start_loss + end_loss) / 2.0\n\n train_op = optimization.create_optimizer(\n total_loss, learning_rate, num_train_steps, num_warmup_steps, use_tpu)\n\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode,\n loss=total_loss,\n train_op=train_op,\n scaffold_fn=scaffold_fn)\n elif mode == tf.estimator.ModeKeys.PREDICT:\n predictions = {\n \"unique_ids\": unique_ids,\n \"start_logits\": start_logits,\n \"end_logits\": end_logits,\n }\n output_spec = tf.contrib.tpu.TPUEstimatorSpec(\n mode=mode, predictions=predictions, scaffold_fn=scaffold_fn)\n else:\n raise ValueError(\n \"Only TRAIN and PREDICT modes are supported: %s\" % (mode))\n\n return output_spec\n\n return model_fn\n\n\ndef input_fn_builder(input_file, seq_length, is_training, drop_remainder):\n \"\"\"Creates an `input_fn` closure to be passed to TPUEstimator.\"\"\"\n\n name_to_features = {\n \"unique_ids\": tf.FixedLenFeature([], tf.int64),\n \"input_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n \"input_mask\": tf.FixedLenFeature([seq_length], tf.int64),\n \"segment_ids\": tf.FixedLenFeature([seq_length], tf.int64),\n }\n\n if is_training:\n name_to_features[\"start_positions\"] = tf.FixedLenFeature([], tf.int64)\n name_to_features[\"end_positions\"] = tf.FixedLenFeature([], tf.int64)\n\n def _decode_record(record, name_to_features):\n \"\"\"Decodes a record to a TensorFlow example.\"\"\"\n example = tf.parse_single_example(record, name_to_features)\n\n # tf.Example only supports tf.int64, but the TPU only supports tf.int32.\n # So cast all int64 to int32.\n for name in list(example.keys()):\n t = example[name]\n if t.dtype == tf.int64:\n t = tf.to_int32(t)\n example[name] = t\n\n return example\n\n def input_fn(params):\n \"\"\"The actual input function.\"\"\"\n batch_size = params[\"batch_size\"]\n\n # For training, we want a lot of parallel reading and shuffling.\n # For eval, we want no shuffling and parallel reading doesn't matter.\n d = tf.data.TFRecordDataset(input_file)\n if is_training:\n d = d.repeat()\n d = d.shuffle(buffer_size=100)\n\n d = d.apply(\n tf.contrib.data.map_and_batch(\n lambda record: _decode_record(record, name_to_features),\n batch_size=batch_size,\n drop_remainder=drop_remainder))\n\n return d\n\n return input_fn\n\n\nRawResult = collections.namedtuple(\"RawResult\",\n [\"unique_id\", \"start_logits\", \"end_logits\"])\n\n\ndef write_predictions(all_examples, all_features, all_results, n_best_size,\n max_answer_length, do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file):\n \"\"\"Write final predictions to the json file and log-odds of null if needed.\"\"\"\n tf.logging.info(\"Writing predictions to: %s\" % (output_prediction_file))\n tf.logging.info(\"Writing nbest to: %s\" % (output_nbest_file))\n\n example_index_to_features = collections.defaultdict(list)\n for feature in all_features:\n example_index_to_features[feature.example_index].append(feature)\n\n unique_id_to_result = {}\n for result in all_results:\n unique_id_to_result[result.unique_id] = result\n\n _PrelimPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"PrelimPrediction\",\n [\"feature_index\", \"start_index\", \"end_index\", \"start_logit\", \"end_logit\"])\n\n all_predictions = collections.OrderedDict()\n all_nbest_json = collections.OrderedDict()\n scores_diff_json = collections.OrderedDict()\n\n for (example_index, example) in enumerate(tqdm(all_examples)):\n features = example_index_to_features[example_index]\n\n prelim_predictions = []\n # keep track of the minimum score of null start+end of position 0\n score_null = 1000000 # large and positive\n min_null_feature_index = 0 # the paragraph slice with min mull score\n null_start_logit = 0 # the start logit at the slice with min null score\n null_end_logit = 0 # the end logit at the slice with min null score\n for (feature_index, feature) in enumerate(features):\n result = unique_id_to_result[feature.unique_id]\n start_indexes = _get_best_indexes(result.start_logits, n_best_size)\n end_indexes = _get_best_indexes(result.end_logits, n_best_size)\n # if we could have irrelevant answers, get the min score of irrelevant\n if FLAGS.version_2_with_negative:\n feature_null_score = result.start_logits[0] + result.end_logits[0]\n if feature_null_score < score_null:\n score_null = feature_null_score\n min_null_feature_index = feature_index\n null_start_logit = result.start_logits[0]\n null_end_logit = result.end_logits[0]\n for start_index in start_indexes:\n for end_index in end_indexes:\n # We could hypothetically create invalid predictions, e.g., predict\n # that the start of the span is in the question. We throw out all\n # invalid predictions.\n if start_index >= len(feature.tokens):\n continue\n if end_index >= len(feature.tokens):\n continue\n if start_index not in feature.token_to_orig_map:\n continue\n if end_index not in feature.token_to_orig_map:\n continue\n if not feature.token_is_max_context.get(start_index, False):\n continue\n if end_index < start_index:\n continue\n length = end_index - start_index + 1\n if length > max_answer_length:\n continue\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=feature_index,\n start_index=start_index,\n end_index=end_index,\n start_logit=result.start_logits[start_index],\n end_logit=result.end_logits[end_index]))\n\n if FLAGS.version_2_with_negative:\n prelim_predictions.append(\n _PrelimPrediction(\n feature_index=min_null_feature_index,\n start_index=0,\n end_index=0,\n start_logit=null_start_logit,\n end_logit=null_end_logit))\n prelim_predictions = sorted(\n prelim_predictions,\n key=lambda x: (x.start_logit + x.end_logit),\n reverse=True)\n\n _NbestPrediction = collections.namedtuple( # pylint: disable=invalid-name\n \"NbestPrediction\", [\"text\", \"start_logit\", \"end_logit\"])\n\n seen_predictions = {}\n nbest = []\n for pred in prelim_predictions:\n if len(nbest) >= n_best_size:\n break\n feature = features[pred.feature_index]\n if pred.start_index > 0: # this is a non-null prediction\n tok_tokens = feature.tokens[pred.start_index:(pred.end_index + 1)]\n orig_doc_start = feature.token_to_orig_map[pred.start_index]\n orig_doc_end = feature.token_to_orig_map[pred.end_index]\n orig_tokens = example.doc_tokens[orig_doc_start:(orig_doc_end + 1)]\n tok_text = \" \".join(tok_tokens)\n\n # De-tokenize WordPieces that have been split off.\n tok_text = tok_text.replace(\" ##\", \"\")\n tok_text = tok_text.replace(\"##\", \"\")\n\n # Clean whitespace\n tok_text = tok_text.strip()\n tok_text = \" \".join(tok_text.split())\n orig_text = \" \".join(orig_tokens)\n\n final_text = get_final_text(tok_text, orig_text, do_lower_case)\n if final_text in seen_predictions:\n continue\n\n seen_predictions[final_text] = True\n else:\n final_text = \"\"\n seen_predictions[final_text] = True\n\n nbest.append(\n _NbestPrediction(\n text=final_text,\n start_logit=pred.start_logit,\n end_logit=pred.end_logit))\n\n # if we didn't inlude the empty option in the n-best, inlcude it\n if FLAGS.version_2_with_negative:\n if \"\" not in seen_predictions:\n nbest.append(\n _NbestPrediction(\n text=\"\", start_logit=null_start_logit,\n end_logit=null_end_logit))\n # In very rare edge cases we could have no valid predictions. So we\n # just create a nonce prediction in this case to avoid failure.\n if not nbest:\n nbest.append(\n _NbestPrediction(text=\"empty\", start_logit=0.0, end_logit=0.0))\n\n assert len(nbest) >= 1\n\n total_scores = []\n best_non_null_entry = None\n for entry in nbest:\n total_scores.append(entry.start_logit + entry.end_logit)\n if not best_non_null_entry:\n if entry.text:\n best_non_null_entry = entry\n\n probs = _compute_softmax(total_scores)\n\n nbest_json = []\n for (i, entry) in enumerate(nbest):\n output = collections.OrderedDict()\n output[\"text\"] = entry.text\n output[\"probability\"] = probs[i]\n output[\"start_logit\"] = entry.start_logit\n output[\"end_logit\"] = entry.end_logit\n nbest_json.append(output)\n\n assert len(nbest_json) >= 1\n\n if not FLAGS.version_2_with_negative:\n all_predictions[example.qas_id] = nbest_json[0][\"text\"]\n else:\n # predict \"\" iff the null score - the score of best non-null > threshold\n score_diff = score_null - best_non_null_entry.start_logit - (\n best_non_null_entry.end_logit)\n scores_diff_json[example.qas_id] = score_diff\n if score_diff > FLAGS.null_score_diff_threshold:\n all_predictions[example.qas_id] = \"\"\n else:\n all_predictions[example.qas_id] = best_non_null_entry.text\n\n all_nbest_json[example.qas_id] = nbest_json\n\n with tf.gfile.GFile(output_prediction_file, \"w\") as writer:\n writer.write(json.dumps(all_predictions, indent=4) + \"\\n\")\n\n with tf.gfile.GFile(output_nbest_file, \"w\") as writer:\n writer.write(json.dumps(all_nbest_json, indent=4) + \"\\n\")\n\n if FLAGS.version_2_with_negative:\n with tf.gfile.GFile(output_null_log_odds_file, \"w\") as writer:\n writer.write(json.dumps(scores_diff_json, indent=4) + \"\\n\")\n\n\ndef get_final_text(pred_text, orig_text, do_lower_case):\n \"\"\"Project the tokenized prediction back to the original text.\"\"\"\n\n # When we created the data, we kept track of the alignment between original\n # (whitespace tokenized) tokens and our WordPiece tokenized tokens. So\n # now `orig_text` contains the span of our original text corresponding to the\n # span that we predicted.\n #\n # However, `orig_text` may contain extra characters that we don't want in\n # our prediction.\n #\n # For example, let's say:\n # pred_text = steve smith\n # orig_text = Steve Smith's\n #\n # We don't want to return `orig_text` because it contains the extra \"'s\".\n #\n # We don't want to return `pred_text` because it's already been normalized\n # (the SQuAD eval script also does punctuation stripping/lower casing but\n # our tokenizer does additional normalization like stripping accent\n # characters).\n #\n # What we really want to return is \"Steve Smith\".\n #\n # Therefore, we have to apply a semi-complicated alignment heruistic between\n # `pred_text` and `orig_text` to get a character-to-charcter alignment. This\n # can fail in certain cases in which case we just return `orig_text`.\n\n def _strip_spaces(text):\n ns_chars = []\n ns_to_s_map = collections.OrderedDict()\n for (i, c) in enumerate(text):\n if c == \" \":\n continue\n ns_to_s_map[len(ns_chars)] = i\n ns_chars.append(c)\n ns_text = \"\".join(ns_chars)\n return (ns_text, ns_to_s_map)\n\n # We first tokenize `orig_text`, strip whitespace from the result\n # and `pred_text`, and check if they are the same length. If they are\n # NOT the same length, the heuristic has failed. If they are the same\n # length, we assume the characters are one-to-one aligned.\n tokenizer = tokenization.BasicTokenizer(do_lower_case=do_lower_case)\n\n tok_text = \" \".join(tokenizer.tokenize(orig_text))\n\n start_position = tok_text.find(pred_text)\n if start_position == -1:\n if FLAGS.verbose_logging:\n tf.logging.info(\n \"Unable to find text: '%s' in '%s'\" % (pred_text, orig_text))\n return orig_text\n end_position = start_position + len(pred_text) - 1\n\n (orig_ns_text, orig_ns_to_s_map) = _strip_spaces(orig_text)\n (tok_ns_text, tok_ns_to_s_map) = _strip_spaces(tok_text)\n\n if len(orig_ns_text) != len(tok_ns_text):\n if FLAGS.verbose_logging:\n tf.logging.info(\"Length not equal after stripping spaces: '%s' vs '%s'\",\n orig_ns_text, tok_ns_text)\n return orig_text\n\n # We then project the characters in `pred_text` back to `orig_text` using\n # the character-to-character alignment.\n tok_s_to_ns_map = {}\n for (i, tok_index) in six.iteritems(tok_ns_to_s_map):\n tok_s_to_ns_map[tok_index] = i\n\n orig_start_position = None\n if start_position in tok_s_to_ns_map:\n ns_start_position = tok_s_to_ns_map[start_position]\n if ns_start_position in orig_ns_to_s_map:\n orig_start_position = orig_ns_to_s_map[ns_start_position]\n\n if orig_start_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map start position\")\n return orig_text\n\n orig_end_position = None\n if end_position in tok_s_to_ns_map:\n ns_end_position = tok_s_to_ns_map[end_position]\n if ns_end_position in orig_ns_to_s_map:\n orig_end_position = orig_ns_to_s_map[ns_end_position]\n\n if orig_end_position is None:\n if FLAGS.verbose_logging:\n tf.logging.info(\"Couldn't map end position\")\n return orig_text\n\n output_text = orig_text[orig_start_position:(orig_end_position + 1)]\n return output_text\n\n\ndef _get_best_indexes(logits, n_best_size):\n \"\"\"Get the n-best logits from a list.\"\"\"\n index_and_score = sorted(enumerate(logits), key=lambda x: x[1], reverse=True)\n\n best_indexes = []\n for i in range(len(index_and_score)):\n if i >= n_best_size:\n break\n best_indexes.append(index_and_score[i][0])\n return best_indexes\n\n\ndef _compute_softmax(scores):\n \"\"\"Compute softmax probability over raw logits.\"\"\"\n if not scores:\n return []\n\n max_score = None\n for score in scores:\n if max_score is None or score > max_score:\n max_score = score\n\n exp_scores = []\n total_sum = 0.0\n for score in scores:\n x = math.exp(score - max_score)\n exp_scores.append(x)\n total_sum += x\n\n probs = []\n for score in exp_scores:\n probs.append(score / total_sum)\n return probs\n\n\nclass FeatureWriter(object):\n \"\"\"Writes InputFeature to TF example file.\"\"\"\n\n def __init__(self, filename, is_training):\n self.filename = filename\n self.is_training = is_training\n self.num_features = 0\n self._writer = tf.python_io.TFRecordWriter(filename)\n\n def process_feature(self, feature):\n \"\"\"Write a InputFeature to the TFRecordWriter as a tf.train.Example.\"\"\"\n self.num_features += 1\n\n def create_int_feature(values):\n feature = tf.train.Feature(\n int64_list=tf.train.Int64List(value=list(values)))\n return feature\n\n features = collections.OrderedDict()\n features[\"unique_ids\"] = create_int_feature([feature.unique_id])\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n\n if self.is_training:\n features[\"start_positions\"] = create_int_feature([feature.start_position])\n features[\"end_positions\"] = create_int_feature([feature.end_position])\n impossible = 0\n if feature.is_impossible:\n impossible = 1\n features[\"is_impossible\"] = create_int_feature([impossible])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n self._writer.write(tf_example.SerializeToString())\n\n def close(self):\n self._writer.close()\n\n\ndef validate_flags_or_throw(bert_config):\n \"\"\"Validate the input FLAGS or throw an exception.\"\"\"\n tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case,\n FLAGS.init_checkpoint)\n\n if not FLAGS.do_train and not FLAGS.do_predict:\n raise ValueError(\"At least one of `do_train` or `do_predict` must be True.\")\n\n if FLAGS.do_train:\n if not FLAGS.train_file:\n raise ValueError(\n \"If `do_train` is True, then `train_file` must be specified.\")\n if FLAGS.do_predict:\n if not FLAGS.predict_file:\n raise ValueError(\n \"If `do_predict` is True, then `predict_file` must be specified.\")\n\n if FLAGS.max_seq_length > bert_config.max_position_embeddings:\n raise ValueError(\n \"Cannot use sequence length %d because the BERT model \"\n \"was only trained up to sequence length %d\" %\n (FLAGS.max_seq_length, bert_config.max_position_embeddings))\n\n if FLAGS.max_seq_length <= FLAGS.max_query_length + 3:\n raise ValueError(\n \"The max_seq_length (%d) must be greater than max_query_length \"\n \"(%d) + 3\" % (FLAGS.max_seq_length, FLAGS.max_query_length))\n\n\ndef main(_):\n tf.logging.set_verbosity(tf.logging.INFO)\n\n bert_config = modeling.BertConfig.from_json_file(FLAGS.bert_config_file)\n\n # validate_flags_or_throw(bert_config)\n\n tf.gfile.MakeDirs(FLAGS.output_dir)\n\n tokenizer = tokenization.FullTokenizer(\n vocab_file=FLAGS.vocab_file, do_lower_case=FLAGS.do_lower_case)\n\n tpu_cluster_resolver = None\n if FLAGS.use_tpu and FLAGS.tpu_name:\n tpu_cluster_resolver = tf.contrib.cluster_resolver.TPUClusterResolver(\n FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)\n\n is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2\n run_config = tf.contrib.tpu.RunConfig(\n cluster=tpu_cluster_resolver,\n master=FLAGS.master,\n model_dir=FLAGS.output_dir,\n save_checkpoints_steps=FLAGS.save_checkpoints_steps,\n tpu_config=tf.contrib.tpu.TPUConfig(\n iterations_per_loop=FLAGS.iterations_per_loop,\n num_shards=FLAGS.num_tpu_cores,\n per_host_input_for_training=is_per_host))\n\n train_examples = None\n num_train_steps = None\n num_warmup_steps = None\n if FLAGS.do_train:\n train_examples = read_forum_data(\n input_file=FLAGS.train_file, is_training=True)\n num_train_steps = int(\n len(train_examples) / FLAGS.train_batch_size * FLAGS.num_train_epochs)\n num_warmup_steps = int(num_train_steps * FLAGS.warmup_proportion)\n\n # Pre-shuffle the input to avoid having to make a very large shuffle\n # buffer in in the `input_fn`.\n rng = random.Random(12345)\n rng.shuffle(train_examples)\n\n model_fn = model_fn_builder(\n bert_config=bert_config,\n init_checkpoint=FLAGS.init_checkpoint,\n learning_rate=FLAGS.learning_rate,\n num_train_steps=num_train_steps,\n num_warmup_steps=num_warmup_steps,\n use_tpu=FLAGS.use_tpu,\n use_one_hot_embeddings=FLAGS.use_tpu)\n\n # If TPU is not available, this will fall back to normal Estimator on CPU\n # or GPU.\n estimator = tf.contrib.tpu.TPUEstimator(\n use_tpu=FLAGS.use_tpu,\n model_fn=model_fn,\n config=run_config,\n train_batch_size=FLAGS.train_batch_size,\n predict_batch_size=FLAGS.predict_batch_size)\n\n if FLAGS.do_train:\n # We write to a temporary file to avoid storing very large constant tensors\n # in memory.\n train_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"train.tf_record\"),\n is_training=True)\n convert_examples_to_features(\n examples=train_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=True,\n output_fn=train_writer.process_feature)\n train_writer.close()\n\n tf.logging.info(\"***** Running training *****\")\n tf.logging.info(\" Num orig examples = %d\", len(train_examples))\n tf.logging.info(\" Num split examples = %d\", train_writer.num_features)\n tf.logging.info(\" Batch size = %d\", FLAGS.train_batch_size)\n tf.logging.info(\" Num steps = %d\", num_train_steps)\n del train_examples\n\n train_input_fn = input_fn_builder(\n input_file=train_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=True,\n drop_remainder=True)\n estimator.train(input_fn=train_input_fn, max_steps=num_train_steps)\n\n if FLAGS.do_predict:\n eval_examples = read_forum_data(\n input_file=FLAGS.predict_file, is_training=False)\n\n eval_writer = FeatureWriter(\n filename=os.path.join(FLAGS.output_dir, \"eval.tf_record\"),\n is_training=False)\n eval_features = []\n\n def append_feature(feature):\n eval_features.append(feature)\n eval_writer.process_feature(feature)\n\n convert_examples_to_features(\n examples=eval_examples,\n tokenizer=tokenizer,\n max_seq_length=FLAGS.max_seq_length,\n doc_stride=FLAGS.doc_stride,\n max_query_length=FLAGS.max_query_length,\n is_training=False,\n output_fn=append_feature)\n eval_writer.close()\n\n tf.logging.info(\"***** Running predictions *****\")\n tf.logging.info(\" Num orig examples = %d\", len(eval_examples))\n tf.logging.info(\" Num split examples = %d\", len(eval_features))\n tf.logging.info(\" Batch size = %d\", FLAGS.predict_batch_size)\n\n all_results = []\n\n predict_input_fn = input_fn_builder(\n input_file=eval_writer.filename,\n seq_length=FLAGS.max_seq_length,\n is_training=False,\n drop_remainder=False)\n\n # If running eval on the TPU, you will need to specify the number of\n # steps.\n all_results = []\n for result in estimator.predict(\n predict_input_fn, yield_single_examples=True):\n if len(all_results) % 1000 == 0:\n tf.logging.info(\"Processing example: %d\" % (len(all_results)))\n unique_id = int(result[\"unique_ids\"])\n start_logits = [float(x) for x in result[\"start_logits\"].flat]\n end_logits = [float(x) for x in result[\"end_logits\"].flat]\n all_results.append(\n RawResult(\n unique_id=unique_id,\n start_logits=start_logits,\n end_logits=end_logits))\n\n output_prediction_file = os.path.join(FLAGS.output_dir, \"predictions.json\")\n output_nbest_file = os.path.join(FLAGS.output_dir, \"nbest_predictions.json\")\n output_null_log_odds_file = os.path.join(FLAGS.output_dir, \"null_odds.json\")\n\n write_predictions(eval_examples, eval_features, all_results,\n FLAGS.n_best_size, FLAGS.max_answer_length,\n FLAGS.do_lower_case, output_prediction_file,\n output_nbest_file, output_null_log_odds_file)\n\n\nif __name__ == \"__main__\":\n flags.mark_flag_as_required(\"vocab_file\")\n flags.mark_flag_as_required(\"bert_config_file\")\n flags.mark_flag_as_required(\"output_dir\")\n tf.app.run()\n"
] |
[
[
"tensorflow.contrib.cluster_resolver.TPUClusterResolver",
"tensorflow.logging.warning",
"tensorflow.FixedLenFeature",
"tensorflow.nn.log_softmax",
"tensorflow.gfile.GFile",
"tensorflow.reduce_sum",
"tensorflow.train.init_from_checkpoint",
"tensorflow.gfile.MakeDirs",
"tensorflow.to_int32",
"tensorflow.contrib.tpu.TPUEstimatorSpec",
"tensorflow.contrib.tpu.TPUEstimator",
"tensorflow.data.TFRecordDataset",
"tensorflow.truncated_normal_initializer",
"tensorflow.python_io.TFRecordWriter",
"tensorflow.logging.set_verbosity",
"tensorflow.trainable_variables",
"tensorflow.parse_single_example",
"tensorflow.app.run",
"tensorflow.matmul",
"tensorflow.unstack",
"tensorflow.zeros_initializer",
"tensorflow.logging.info",
"tensorflow.one_hot",
"tensorflow.contrib.tpu.TPUConfig",
"tensorflow.train.Features",
"tensorflow.nn.bias_add",
"tensorflow.train.Scaffold",
"tensorflow.transpose",
"tensorflow.flags.DEFINE_string",
"tensorflow.reshape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
semodi/arxiv_app
|
[
"e33e8e1eae96e25cf41d4169dca82d88615e5d08",
"e33e8e1eae96e25cf41d4169dca82d88615e5d08"
] |
[
"arxv/app.py",
"web/dash_app.py"
] |
[
"import arxiv\nfrom flask import Flask, request, redirect, url_for, flash, jsonify\nimport pymysql\nimport pandas as pd\nimport datetime\nimport time\nimport mysql_config\nimport sys\nimport logging\nMAX_ARTICLES = 10000\n\ndef make_entry(d):\n \"\"\" Create database entry from query result\"\"\"\n id_ = d['id']\n updated = datetime.datetime.strptime(d['updated'], '%Y-%m-%dT%H:%M:%SZ')\n title = d['title']\n summary = d.get('summary','')\n tags = ', '.join([v['term'] for v in d['tags']])\n authors = ', '.join(d['authors'])\n return id_, updated, title, summary, tags, authors\n\ndef pull_data():\n conn = pymysql.connect(mysql_config.host,\n user=mysql_config.name,\n passwd=mysql_config.password,\n connect_timeout=5,\n port=mysql_config.port)\n c = conn.cursor()\n c.execute(''' create database if not exists arxiv''')\n conn.commit()\n conn.close()\n\n conn = pymysql.connect(mysql_config.host,\n user=mysql_config.name,\n passwd=mysql_config.password,\n db = 'arxiv',\n connect_timeout=5,\n port=mysql_config.port)\n c = conn.cursor()\n\n c.execute('''create table if not exists articles\n (id VARCHAR(100) unique, updated DATETIME, title TINYTEXT, summary TEXT, tags TINYTEXT, authors MEDIUMTEXT)''')\n\n c.execute('''create table if not exists users\n (id INTEGER NOT NULL AUTO_INCREMENT,\n created DATETIME,\n name VARCHAR(100),\n PRIMARY KEY (id))''')\n\n if not len(pd.read_sql(''' SELECT * FROM users''', conn)): #Add test user if table empty\n c.execute('''insert into users (id, created, name)\n values (NULL, %s, %s)''',\n (datetime.datetime.now(),'johndoe'))\n\n c.execute('''create table if not exists bookmarks\n (id INTEGER NOT NULL AUTO_INCREMENT,\n article_id VARCHAR(100),\n user_id INTEGER,\n created DATETIME,\n PRIMARY KEY(id))''')\n\n\n latest = pd.read_sql('''SELECT\n Max(updated) as dt\n FROM articles''', conn)['dt'][0]\n\n starting_over = False\n if not latest:\n logging.warning('No articles contained in table. Starting over...')\n latest = datetime.datetime(1900, 1, 1,1,1,1)\n starting_over = True\n\n cnt = 0\n for start in range(0, MAX_ARTICLES, 1000):\n if starting_over: logging.warning('{:d}/{:d} articles added'.format(start, MAX_ARTICLES))\n for q in arxiv.query('cat:cs.LG',max_results=1000, start=start, sort_by='submittedDate'):\n entry = make_entry(q)\n this_time = entry[1]\n if this_time <= latest:\n break\n else:\n c.execute('''insert into articles\n values (%s, %s, %s, %s, %s, %s)''',entry)\n cnt += 1\n else:\n continue\n break\n logging.warning('Total number of articles added: {:d}'.format(cnt))\n conn.commit()\n conn.close()\n\n return 'Total number of articles added: {:d}'.format(cnt)\n\n\napp = Flask(__name__)\n\n\[email protected]('/api/update', methods=['POST'])\ndef get_recommendation():\n try:\n pull_data()\n return \"{ Success }\"\n except:\n return \"{An error occured while trying to update the database}\"\n\nif __name__ == '__main__':\n app.run(debug=True, host='0.0.0.0',port='6540')\n",
"import pandas as pd\nimport pymysql\nimport dash\nimport dash_table\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output, State\nimport pandas as pd\nimport datetime\nimport requests\nimport json\nimport mysql_config\nimport warnings\nimport time\nimport pickle\nimport graph\nimport logging\nrecommender_url = 'http://rec:6545/api/'\n# Test user\nUSERNAME = 'root'\nU_ID = 0\ndef connect():\n return pymysql.connect(host=mysql_config.host,\n user=mysql_config.name,\n passwd=mysql_config.password,\n connect_timeout=5,\n database='arxiv',\n port=mysql_config.port)\n\n# r = requests.post(recommender_url + 'index/')\n\ndef get_authors_short(df):\n def names_to_dotted(names):\n name_to_dotted = lambda x: x[0] + '.'\n names = [n.split(' ') for n in names.split(', ')]\n names = [' '.join(map(name_to_dotted,n[:-1])) + ' ' + n[-1] for n in names]\n return ', '.join(names)\n\n if isinstance(df, pd.DataFrame):\n df['authors_short'] = df['authors'].apply(names_to_dotted)\n else:\n for i, record in enumerate(df):\n df[i]['authors_short'] = names_to_dotted(record['authors'])\n\n return df\n\ndef get_recommendations(no_papers=10, cutoff_days = 20, based_on = None, return_A = False):\n # Recommendations based on saved bookmarks\n headers = {'content-type': 'application/json', 'Accept-Charset': 'UTF-8'}\n data = {'user_id': U_ID, 'no_papers':no_papers, 'cutoff_days': cutoff_days, 'based_on': based_on}\n data = json.dumps(data)\n try:\n r = requests.post(recommender_url + 'recommend', data=data, headers=headers)\n recommendations = json.loads(r.text)\n distances = recommendations['distances']\n query = recommendations['query']\n recommendations = recommendations['recommendations']\n except Exception as e:\n print(e)\n print('Recommendations could not be retrieved')\n recommendations = []\n query = []\n distances = [[0]]\n # recommendations = []\n # with open('recfile.pckl', 'rb') as file:\n # recommendations = pickle.load(file)\n cond_rec = [{\n 'if': {\n 'filter_query': '{{id}} = {}'.format(r['id']) # matching rows of a hidden column with the id, `id`\n },\n 'color': 'tomato',\n 'fontWeight': 'bold'\n } for r in recommendations]\n if return_A:\n return cond_rec, recommendations, query, distances\n else:\n return cond_rec, recommendations\n\ncond_rec, recommendations, query, distances = get_recommendations(return_A=True)\ndisplay_columns = ['title', 'authors_short']\nday = lambda i: '{:d} days ago'.format(abs(i)) if i != 0 else 'today'\n\nnetwork_fig, colors = graph.get_graph(query + recommendations, distances, n_query=len(query))\n# =============== LAYOUT ===================\n\n# external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']\n# app = dash.Dash(__name__, external_stylesheets=external_stylesheets)\napp = dash.Dash(\n __name__,\n meta_tags=[{\"name\": \"viewport\", \"content\": \"width=device-width, initial-scale=1\"}],\n)\napp.title = 'paper-scraper'\npanel_color = '#161a28'\nsuffix_row = \"_row\"\nsuffix_button_id = \"_button\"\nsuffix_sparkline_graph = \"_sparkline_graph\"\nsuffix_count = \"_count\"\nsuffix_ooc_n = \"_OOC_number\"\nsuffix_ooc_g = \"_OOC_graph\"\nsuffix_indicator = \"_indicator\"\napp.layout = html.Div(children=\n [\n\n html.Div(\n id=\"banner\",\n className=\"banner\",\n children=[\n html.Div(\n id=\"banner-text\",\n children=[\n html.H5(\"paper-scraper\"),\n ],\n ),\n html.Div(\n id=\"banner-logo\",\n children=[\n # html.Button(\n # id=\"learn-more-button\", children=\"LEARN MORE\", n_clicks=0\n # ),\n html.Div(className='five columns', children=[\n html.P(),\n html.P('created with')\n ]),\n html.Img(id=\"logo\", src=app.get_asset_url(\"dash-logo-new.png\")),\n ],\n ),\n ],\n ),\n html.Div(children=[\n dcc.Tabs(id='tabs',value='tab1', className='custom-tabs', children=[\n dcc.Tab(id='Recent-tab',label='Recent papers', value='tab1',\n className='custom-tab',selected_className='custom-tab--selected', children=[\n # html.Div(className='row', children=\n # [\n html.Div([\n html.Div(className='row', children=\n [\n html.P(id='buffer2', style={'marginTop':'2 rem'}),\n html.Label('Number of papers'),\n dcc.Slider(\n id='slider-no-papers',\n min=5,\n max=100,\n step=1,\n value=15,\n marks={i:'{:d}'.format(i) for i in range(10,101,10)}\n ),\n ], style={'marginBottom': '2em','marginLeft': '8em','marginRight': '8em'}\n ),\n html.Div(\n [\n html.Label('Publication date'),\n dcc.RangeSlider(\n id='time-slider',\n min=-30,\n max=0,\n step=1,\n value=[-10,0],\n marks={i:day(i) for i in range(-30,1,5)}\n ),\n ], style={'marginBottom': '3em','marginLeft': '8em','marginRight': '8em'}\n ),\n html.Div([\n dash_table.DataTable(\n id='table-papers',\n # style_cell={\n # 'whiteSpace': 'normal',\n # 'height': 'auto',\n # 'width': '60%'\n # },\n style_header={\"fontWeight\": \"bold\", \"color\": \"inherit\"},\n style_as_list_view=True,\n fill_width=True,\n style_cell={\n \"backgroundColor\": \"#1e2130\",\n \"fontFamily\": \"Open Sans\",\n \"padding\": \"0 2rem\",\n \"color\": \"darkgray\",\n \"border\": \"none\",\n \"width\" : '60%',\n \"height\" : 'auto',\n \"whiteSpace\" : 'normal'\n },\n css=[\n {\"selector\": \"tr:hover td\", \"rule\": \"color: #91dfd2 !important;\"},\n {\"selector\": \"td\", \"rule\": \"border: none !important;\"},\n {\n \"selector\": \".dash-cell.focused\",\n \"rule\": \"background-color: #1e2130 !important;\",\n },\n {\"selector\": \"table\", \"rule\": \"--accent: #1e2130;\"},\n {\"selector\": \"tr\", \"rule\": \"background-color: transparent\"},\n ],\n style_data_conditional= cond_rec,\n row_selectable=\"multi\",\n columns=[{\"name\": i.split('_')[0], \"id\": i} for i in display_columns],\n # data=df.to_dict('records'),\n style_table={'width': '100%', 'height': 500,'overflowY':'scroll'})\n ],className='eleven columns'),\n html.Button('Bookmark selected',\n id ='bookmark-button'),\n html.P(id='updated-bookmarks',children=1,hidden=True),\n # ]),\n ],style={'backgroundColor':panel_color}),\n ],style = {'backgroundColor':panel_color}),\n dcc.Tab(id='Rec-tab',label='Recommended', value='tab2',\n className='custom-tab',selected_className='custom-tab--selected', children=[\n # html.Div(className='row', children=\n # [\n html.Div([\n html.Div(className='row', children=\n [\n html.P(id='buffer1', style={'marginTop':'2 rem'}),\n html.Label('Number of papers'),\n dcc.Slider(\n id='slider-no-papers-rec',\n min=5,\n max=100,\n step=1,\n value=15,\n marks={i:'{:d}'.format(i) for i in range(10,101,10)}\n ),\n ], style={'marginBottom': '2em','marginLeft': '8em','marginRight': '8em'}\n ),\n html.Div(\n [\n html.Label('Publication date'),\n dcc.Slider(\n id='time-slider-rec',\n min=0,\n max=30,\n step=1,\n value=20,\n marks={i:day(-i) for i in range(0,31,5)}\n ),\n ], style={'marginBottom': '3em','marginLeft': '8em','marginRight': '8em'}\n ),\n html.Div([\n dash_table.DataTable(\n id='table-rec',\n # style_cell={\n # 'whiteSpace': 'normal',\n # 'height': 'auto',\n # 'width': '60%'\n # },\n style_header={\"fontWeight\": \"bold\", \"color\": \"inherit\"},\n style_as_list_view=True,\n fill_width=True,\n style_cell={\n \"backgroundColor\": \"#1e2130\",\n \"fontFamily\": \"Open Sans\",\n \"padding\": \"0 2rem\",\n \"color\": \"darkgray\",\n \"border\": \"none\",\n \"width\" : '60%',\n \"height\" : 'auto',\n \"whiteSpace\" : 'normal'\n },\n css=[\n {\"selector\": \"tr:hover td\", \"rule\": \"color: #91dfd2 !important;\"},\n {\"selector\": \"td\", \"rule\": \"border: none !important;\"},\n {\n \"selector\": \".dash-cell.focused\",\n \"rule\": \"background-color: #1e2130 !important;\",\n },\n {\"selector\": \"table\", \"rule\": \"--accent: #1e2130;\"},\n {\"selector\": \"tr\", \"rule\": \"background-color: transparent\"},\n ],\n row_selectable=\"multi\",\n columns=[{\"name\": i.split('_')[0], \"id\": i} for i in display_columns],\n # data=df.to_dict('records'),\n style_table={'width': '100%', 'height': 500,'overflowY':'scroll'})\n ],className='eleven columns'),\n html.Button('Bookmark selected',\n id ='bookmark-button-rec'),\n html.P(id='updated-bookmarks-rec',children=1,hidden=True),\n # ]),\n ],style={'backgroundColor':panel_color}),\n ]),\n dcc.Tab(id='Exp-tab',label='Explore',value='tab3',\n selected_className='custom-tab--selected',\n className='custom-tab', children = [\n html.P(id='buffer3', style={'marginTop':'2 rem'}),\n dcc.Graph(id='network-graph', figure=network_fig, style={'width':'100%','height':500}),\n html.Button('Reset',id ='reset-button'),\n html.Button('Bookmark',id ='bookmark-button-explore'),\n html.Div(id='hidden-graphs', children=[\n dcc.Graph(id='network-graph-master', figure=network_fig),\n dcc.Graph(id='network-graph-1', figure=network_fig),\n dcc.Graph(id='network-graph-2', figure=network_fig),\n dcc.Graph(id='network-graph-3', figure=network_fig),\n dcc.Graph(id='network-graph-4', figure=network_fig),\n html.P(id='zoomed-in'),\n html.Div(id='hover-data'),\n ],style={'display':'none'})\n ])\n ])\n ], className='tabs six columns'),\n# ]),\n html.Div(\n [\n # html.H3('Info'),\n\n # dcc.Textarea(\n # id='textarea-abstract',\n # value='Textarea content initialized\\nwith multiple lines of text',\n # style={'width': '100%', 'height': 300}),\n # html.Br(),\n html.Div(className='twelve columns', children = [\n html.Div(className='section-banner',children='Info'),\n html.Div(className='eleven columns', children = [\n html.Br(),\n html.Center([\n dcc.Markdown(\n id='textarea-abstract',\n children='Click on entry to display information',\n style={'width': '99%', 'height': 337,'overflowY':'scroll','text-align':'left'}),\n ]),\n html.A(id='gotolink', children='Go to paper', href='http://www.google.com'),])\n ],style={'backgroundColor':panel_color, 'marginBottom':'1rem'}),\n html.Div(className='twelve columns', children = [\n html.Div(className='section-banner',children='Bookmarked'),\n # html.H3('Bookmarked'),\n html.Div(className='eleven columns', children = [\n dash_table.DataTable(\n id='table-bookmarks',\n style_header={\"fontWeight\": \"bold\", \"color\": \"inherit\"},\n style_as_list_view=True,\n fill_width=True,\n style_cell={\n \"backgroundColor\": \"#1e2130\",\n \"fontFamily\": \"Open Sans\",\n \"padding\": \"0 2rem\",\n \"color\": \"darkgray\",\n \"border\": \"none\",\n \"width\" : '60%',\n \"height\" : 'auto',\n \"whiteSpace\" : 'normal'\n },\n css=[\n {\"selector\": \"tr:hover td\", \"rule\": \"color: #91dfd2 !important;\"},\n {\"selector\": \"td\", \"rule\": \"border: none !important;\"},\n {\n \"selector\": \".dash-cell.focused\",\n \"rule\": \"background-color: #1e2130 !important;\",\n },\n {\"selector\": \"table\", \"rule\": \"--accent: #1e2130;\"},\n {\"selector\": \"tr\", \"rule\": \"background-color: transparent\"},\n ],\n columns=[{\"name\": i.split('_')[0], \"id\": i} for i in display_columns],\n # data=df_bookmarks.to_dict('records'),\n style_table={'width': '99%', 'height': 308,'overflowY':'scroll'}),\n ]),\n html.P(id='selected-bookmarks'), # Hacky workaround bc. dash would mix up callbacks between tables\n html.P(id='selected-papers'),\n html.P(id='selected-rec'),\n html.P(id='loading-rec'),\n html.P(id='ref-trigger'),\n html.P(id='placeholder'),\n ],style={'backgroundColor':panel_color}),\n ], className='five columns'),\n# ]),\n\n ], className = 'row'\n)\n\n# ============= CALLBACKS =================\n\[email protected](\n Output('updated-bookmarks','children'),\n [Input('bookmark-button', 'n_clicks'),\n Input('bookmark-button-rec', 'n_clicks'),\n Input('bookmark-button-explore','n_clicks')],\n [State('table-papers','selected_rows'),\n State('table-papers','data'),\n State('table-rec', 'selected_rows'),\n State('table-rec', 'data'),\n State('network-graph','hoverData')])\ndef bookmark_papers(_, __, ___, rows_pap, data_pap, rows_rec, data_rec,hover_data):\n\n ctx = dash.callback_context\n ctx = ctx.triggered[0]['prop_id']\n if ctx == 'bookmark-button.n_clicks':\n rows = rows_pap\n data = data_pap\n elif ctx == 'bookmark-button-rec.n_clicks':\n rows = rows_rec\n data = data_rec\n elif ctx == 'bookmark-button-explore.n_clicks':\n if hover_data is not None:\n rows = [0]\n data = [json.loads(hover_data['points'][0]['customdata'])]\n else:\n rows = []\n else:\n rows = []\n\n updated = False\n conn = connect()\n c = conn.cursor()\n # Load user's bookmarks into memory\n df = pd.read_sql(\"\"\" SELECT * FROM bookmarks\n WHERE user_id = {:d} \"\"\".format(U_ID), conn)\n for row in rows:\n # Check if bookmark already exists\n if not data[row]['id'] in df['article_id'].values:\n updated = True\n c.execute(''' INSERT INTO bookmarks\n values (NULL, %s, %s, %s)''',(data[row]['id'], U_ID, datetime.datetime.now()))\n conn.commit()\n conn.close()\n\n return int(updated)\n\[email protected](\n Output('table-bookmarks','data'),\n Input('updated-bookmarks','children'),\n State('table-bookmarks','data'))\ndef update_bookmark_table(value, data):\n if value or data is None:\n\n conn = connect()\n df_bookmarks = pd.read_sql(\"\"\" SELECT\n articles.id as id,\n bookmarks.user_id as user_id,\n bookmarks.created,\n updated,\n authors,\n title,\n summary\n FROM articles\n INNER JOIN bookmarks\n ON articles.id = bookmarks.article_id\n WHERE bookmarks.user_id = {}\n ORDER BY bookmarks.created DESC\"\"\".format(U_ID), conn)\n conn.close()\n df_bookmarks = get_authors_short(df_bookmarks)\n return df_bookmarks.to_dict('records')\n else:\n print('Nothing to update')\n return data\n\n\[email protected](\n [Output('textarea-abstract','children'),\n Output('gotolink','children'),\n Output('gotolink','href'),\n Output('selected-bookmarks','value'),\n Output('selected-papers','value'),\n Output('selected-rec', 'value')],\n [Input('table-bookmarks','active_cell'),\n Input('table-papers','active_cell'),\n Input('table-rec','active_cell'),\n Input('network-graph', 'hoverData')],\n [State('table-bookmarks','data'),\n State('table-papers','data'),\n State('table-rec','data'),\n State('selected-bookmarks','value'),\n State('selected-papers','value'),\n State('selected-rec', 'value')])\ndef get_active(active_cell_bm, active_cell_p, active_cell_rec, hoverData, data_bm, data_p, data_rec, sbm, sp, srec):\n \"\"\" Check which paper selected and display its summary\"\"\"\n\n def get_summary(row):\n return '**' + row['title'] + '** \\n\\n *' + row['authors'] + '*\\n\\n' + \\\n 'Updated: ' + row['updated'].split('T')[0] + '\\n\\n' + \\\n row['summary'].replace('\\n',' ')\n\n if hoverData:\n based_on = json.loads(hoverData['points'][0]['customdata'])\n summary = get_summary(based_on)\n\n return summary,'Go to paper', based_on['id'], sbm, sp, srec\n\n else:\n if active_cell_bm and active_cell_bm != sbm:\n row = active_cell_bm['row']\n summary = get_summary(data_bm[row])\n sbm = active_cell_bm\n return summary,'Go to paper',data_bm[row]['id'], sbm, sp, srec\n elif active_cell_p and active_cell_p != sp:\n row = active_cell_p['row']\n summary = get_summary(data_p[row])\n sp = active_cell_p\n return summary,'Go to paper',data_p[row]['id'], sbm, sp, srec\n elif active_cell_rec :\n row = active_cell_rec['row']\n summary = get_summary(data_rec[row])\n sp = active_cell_rec\n return summary,'Go to paper',data_rec[row]['id'], sbm, sp, srec\n else:\n return 'Click on entry to display information','Go to paper','', sbm, sp, srec\n\n\n\[email protected](\n [Output('table-papers','style_data_conditional'),\n Output('table-rec', 'data'),\n Output('network-graph-master', 'figure'),\n Output('table-bookmarks','style_data_conditional')],\n [Input('time-slider-rec', 'value'),\n Input('slider-no-papers-rec', 'value'),\n Input('ref-trigger','value')],\n [State('table-papers','style_data_conditional'),\n State('table-rec', 'data'),\n State('network-graph-master', 'figure'),\n State('table-bookmarks','style_data_conditional')])\ndef update_recommendations(time_lim, no_papers, ref_trigger, *states):\n logging.warning(ref_trigger)\n if ref_trigger:\n cond_rec, recommendations, query, distances = get_recommendations(no_papers, time_lim, return_A=True)\n\n total = query + recommendations\n network_fig_, colors = graph.get_graph(total, distances, n_query=len(query))\n cond_rec_bm = [{\n 'if': {\n 'filter_query': '{{id}} = {}'.format(r['id']) # matching rows of a hidden column with the id, `id`\n },\n 'color': c,\n } for r, c in zip(total,colors)]\n if len(total):\n return cond_rec, get_authors_short(recommendations), network_fig_, cond_rec_bm\n else:\n return [],[{'title':'No bookmarks yet...','authors':''}], network_fig_, cond_rec_bm\n else:\n return states\[email protected](\n Output('table-papers', 'data'),\n [Input('time-slider', 'value'),\n Input('slider-no-papers', 'value')])\ndef filter_papers(time_lim, no_papers):\n \"\"\" Apply number and date filter to displayed papers\"\"\"\n conn = connect()\n query = \"\"\" SELECT\n *\n FROM articles\n WHERE DATE(updated) > DATE_ADD(DATE(NOW()),INTERVAL {:d} day)\n AND DATE(updated) < DATE_ADD(DATE(NOW()), INTERVAL {:d} day) LIMIT {:d} \"\"\".format(time_lim[0],\n time_lim[1],\n no_papers)\n df = pd.read_sql(query, conn)\n conn.close()\n df = get_authors_short(df)\n if len(df):\n return df.to_dict('records')\n else:\n return [{'title':'Pulling recent articles from arXiv', 'authors':''}]\n\[email protected](\n [Output('network-graph', 'figure'),\n Output('network-graph-1', 'figure'),\n Output('zoomed-in', 'value')],\n [Input('network-graph', 'clickData'),\n Input('network-graph-master', 'figure'),\n Input('reset-button','n_clicks')],\n [State('network-graph', 'figure'),\n State('network-graph-1', 'figure'),\n State('zoomed-in','value')], prevent_initial_call=True)\ndef display_click_data(clickData,fig_master,n_clicks, fig, fig_1,zoomed):\n ctx = dash.callback_context\n ctx = ctx.triggered[0]['prop_id']\n if ctx == 'network-graph.clickData' and clickData:\n based_on = [json.loads(clickData['points'][0]['customdata'])]\n curve_no = clickData['points'][0]['pointNumber']\n if zoomed and curve_no == 0:\n return fig_1, fig_master, 0\n cond_rec, recommendations, query, distances = get_recommendations(10, 10000,\n based_on = based_on, return_A=True)\n\n total = query + recommendations\n network_fig, colors = graph.get_graph(total, distances, n_query=1)\n return network_fig, fig, 1\n elif ctx in ['network-graph-master.figure','reset-button.n_clicks']:\n return fig_master, fig_master, 0\n else:\n return fig, fig_1, 0\n\[email protected](\n [Output('network-graph','hoverData'),\n Output('ref-trigger', 'value')],\n Input('tabs','value'),\n State('time-slider-rec','value')\n)\ndef switch_tabs(inp, value):\n if inp == 'tab2':\n return None, 1\n else:\n return None, 0\n\nif __name__ == '__main__':\n app.run_server(debug=False, host='0.0.0.0', port='8080')\n"
] |
[
[
"pandas.read_sql"
],
[
"pandas.read_sql"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
vespa-mrs/vespa
|
[
"6d3e84a206ec427ac1304e70c7fadf817432956b",
"6d3e84a206ec427ac1304e70c7fadf817432956b",
"6d3e84a206ec427ac1304e70c7fadf817432956b",
"6d3e84a206ec427ac1304e70c7fadf817432956b"
] |
[
"vespa/analysis/functors/funct_fidsum_wbnaa.py",
"vespa/analysis/algos/b0_correction.py",
"vespa/analysis/block_prep_megalaser.py",
"vespa/analysis/chain_spectral.py"
] |
[
"# Python modules\n\n\n\n# 3rd party modules\nimport numpy as np\n\n\n# Our modules\nimport vespa.common.constants as common_constants\nimport vespa.common.minf_parabolic_info as minf\nimport vespa.common.util.generic_spectral as util_spectral\n\n\n\n\ndef funct_optimize_phase0(rad, info):\n \"\"\"\n Optimization function used optimize the zero order phase for FIDs\n in a FidSum dataset. Each FID is compared to the sum of all FIDs.\n A least squared difference is calculated for a phase 0 value that\n best matches the FID to the reference absorption spectrum.\n \n INPUT:\n rad: current phase in radians\n info: control structure, see optimize_phase0 for definition\n \n \"\"\"\n phase = np.exp(1j*rad)\n dat = info['dat'].copy() * phase\n \n istr = info['pts'][0]\n iend = info['pts'][1]\n datt = dat[istr:iend].copy()\n reff = info['ref'][istr:iend].copy() \n\n diff = np.sum((datt.real - reff.real)**2) \n \n return diff \n \n \ndef optimize_phase0(data, modfn, pts):\n \"\"\"\n Returns the zero order phase in deg at which the least squares\n difference between the data and modfn absorption spectra are \n minimized\n \n INPUT:\n data: array of complex data to be phased\n modfn: string array of model function to be used (obsolete)\n pts: list, start and end point region used for leastsqr calculation\n \n \"\"\"\n info = {'dat' : data, \n 'ref' : modfn,\n 'pts' : pts }\n \n # Parabolic interpolation, Brent's method 1-d minimization routine\n \n phase_a = -1*np.pi # lower bound\n phase_b = np.pi*0.1 # \"some\" point in the middle\n phase_c = 2*np.pi # upper bound\n \n phaseat, maxit = minf.minf_parabolic_info( phase_a, phase_b, phase_c,\n funct_optimize_phase0, info)\n phdeg = phaseat*180.0/np.pi\n if phdeg > 180.0:\n phdeg = phdeg - 360.0\n \n return phdeg \n\n\ndef _height2area_function(val, info):\n \"\"\"\n This is the minimization function used by minf_parabolic_info in the\n _calc_height2area_ratio() call. The val parameter is the decay value\n for which we need to calculate a FWHM line width. Because we are minimizing\n in this optimization, we subtract the calculated value from the original\n line width values (in Hz) and take the absolute value.\n\n \"\"\"\n ta = val if info[\"ta\"] == -1 else info[\"ta\"]\n tb = val if info[\"tb\"] == -1 else info[\"tb\"]\n \n width_hz, peak = util_spectral.voigt_width(ta, tb, info[\"chain\"])\n \n info[\"peak\"] = peak\n \n return np.abs(info[\"orig_lw\"] - width_hz)\n\n\ndef _calc_height2area_ratio(lw, chain, ta=-1.0, tb=-1.0 ):\n \"\"\"\n We know the value of the full width half max line width in Hz that we have\n in our data, and want to find the Ta and Tb values that yield this.\n \n This function uses the minf_parabolic_info routine to optimze Ta and Tb\n to values between 0.005 and 0.5, however either of the two parameters can\n also be set to constant values by setting the TA and TB keywords to this\n function to the constant value desired. This way we can calculate Pure \n Gauss or Pure Lorentz lineshape starting values as well as Voigt/LorGauss\n line shape values.\n \n The optimization calls the fitt_height2area_function() to determine the \n minimization function. As part of that call, we calculate the height of the\n peak for Ta and Tb, which is stored in the info.peak parameter. This is \n used to provide a normalization value for peak height to peak area \n conversion on return of this function.\n \n lw - float, linewidth in Hz\n chain - pointer to control structure\n ta - keyword, float, constant value for Ta in the optimization\n tb - keyword, float, constant value for Tb in the optimization\n\n \"\"\"\n info = { 'ta':ta, 'tb':tb, 'orig_lw':lw, 'chain':chain, 'peak':-1.0 }\n \n # Call parabolic interpolation, Brent's method\n # 1-d minimization routine\n \n val_a = 0.005 # lower bound\n val_b = 0.06 # \"some\" point in the middle\n val_c = 0.5 # upper bound\n \n finalval, maxit = minf.minf_parabolic_info( val_a, val_b, val_c,\n _height2area_function, \n info ) \n return [finalval, info[\"peak\"]]\n\n\ndef do_processing_all(chain):\n \"\"\"\n Because we are bumping the zero fill factor here by a factor of 4 to \n better find the peak max, we can not use the standard util_ppm \n functions to calculate some conversions. So long hand here for us.\n \n \"\"\"\n block = chain._block\n set = chain._block.set\n dataset = chain._dataset\n \n zfmult = 4 # larger zfmult here improves peak shift accuracy\n raw_dim0 = dataset.raw_dims[0]\n raw_hpp = dataset.sw / raw_dim0\n fid_dim0 = raw_dim0 * zfmult\n fid_hpp = dataset.sw / fid_dim0\n \n # reset results arrays and temporary arrays\n work = np.zeros((raw_dim0),complex)\n chain.time_summed_offset = np.zeros((raw_dim0),complex)\n chain.freq_current = np.zeros((raw_dim0),complex)\n chain.freq_summed = np.zeros((raw_dim0),complex)\n chain.freq_summed_offset = np.zeros((raw_dim0),complex)\n \n xx = np.arange(raw_dim0) / dataset.sw\n search = np.zeros((raw_dim0 * zfmult),complex)\n\n # convert algorithm values from PPM to points\n b0_start = set.reference_peak_center + set.peak_search_width\n b0_end = set.reference_peak_center - set.peak_search_width\n b0_ctr_pt = (fid_dim0 / 2) - (dataset.frequency * (set.reference_peak_center - dataset.resppm) / fid_hpp)\n b0_start = int((fid_dim0 / 2) - (dataset.frequency * (b0_start - dataset.resppm) / fid_hpp))\n b0_end = int((fid_dim0 / 2) - (dataset.frequency * (b0_end - dataset.resppm) / fid_hpp))\n \n ph0_start = set.phase0_range_start\n ph0_end = set.phase0_range_end\n ph0_ctr = ph0_start - 0.5*(ph0_start - ph0_end)\n ph0_start = int((raw_dim0 / 2) - (dataset.frequency * (ph0_start - dataset.resppm) / raw_hpp))\n ph0_end = int((raw_dim0 / 2) - (dataset.frequency * (ph0_end - dataset.resppm) / raw_hpp))\n\n # one time calculations \n apod = util_spectral.apodize(xx, set.gaussian_apodization, 'Gaussian')\n chop = ((((np.arange(raw_dim0) + 1) % 2) * 2) - 1)\n apod *= chop\n\n nfids = chain.raw.shape[2]\n\n if set.apply_data_exclusion:\n nfids_excluded = nfids - len(block.exclude_indices)\n else:\n nfids_excluded = nfids\n\n\n\n\n #--------------------------------------------------------------------------\n # Depending on AutoCalc flags, calculate B0 and Phase0 corrections\n\n if set.apply_peak_shift: # B0 corrections\n\n for i in range(nfids):\n time = chain.raw[0,0,i,:].copy()\n if set.fid_left_shift_b0 != 0:\n # shift fid to the left and set last points to zero\n time = np.roll(time, -set.fid_left_shift_b0) \n time[-set.fid_left_shift_b0:] = time[0]*0.0 \n \n # Calculate peaks shift if flag set, use oversized zfmult\n # Peak search is performed over user-set range on magnitude data\n search *= 0.0\n search[0:raw_dim0] = time * apod\n search = np.fft.fft(search) \n temp = np.abs(search)\n imax = temp[b0_start:b0_end].argmax()\n delta = (b0_ctr_pt-(b0_start+imax))*fid_hpp\n block.frequency_shift[i] = delta\n\n if set.apply_phase0: # Phase0 corrections\n\n # We need to do this in a second loop, since we need the peaks \n # shifted before we create a reference spectrum from the summed FIDs\n\n # Create reference spectrum \n \n if set.ref_spectrum_source == 'average_all_fids':\n work *= 0\n for i in range(nfids):\n time = chain.raw[0,0,i,:].copy()\n time *= np.exp(1j * 2.0 * np.pi * block.frequency_shift[i] * xx)\n work += time\n \n ref_spec = work.copy() * apod\n ref_spec[0] *= 0.5\n ref_spec = (np.fft.fft(ref_spec) / len(ref_spec))\n ref_spec /= nfids # scale for comparison to single FID\n else:\n # create reference peak at center of range\n res = _calc_height2area_ratio( set.ref_peak_line_width, dataset )\n ref_spec = util_spectral.create_spectrum([1.0,], [ph0_ctr,], [0.0,], dataset, ta=res[0], tb=res[0])\n \n # Calc Phase0 correction\n \n for i in range(nfids):\n time = chain.raw[0,0,i,:].copy()\n\n if set.fid_left_shift_phase0 != 0: # shift fid left, set last points to zero\n time = np.roll(time, -set.fid_left_shift_phase0) \n time[-set.fid_left_shift_phase0:] = time[0]*0.0 \n \n time *= np.exp(1j * 2.0 * np.pi * block.frequency_shift[i] * xx) \n \n time[0] *= 0.5\n tmp_freq = (np.fft.fft(time * apod) / len(time))\n phdeg = optimize_phase0(tmp_freq, ref_spec, [ph0_start, ph0_end])\n block.phase_0[i] = phdeg\n\n # Apply B0 and Phase0 corrections to raw data to create current and \n # summed FID arrays with AutoCorr Left Shift values applied\n \n for i in range(nfids):\n\n time = chain.raw[0,0,i,:].copy()\n\n if set.fid_left_shift_phase0 != 0:\n time = np.roll(time, -set.fid_left_shift_phase0) \n time[-set.fid_left_shift_phase0:] = time[0]*0.0 \n elif set.fid_left_shift_b0 != 0:\n time = np.roll(time, -set.fid_left_shift_b0) \n time[-set.fid_left_shift_b0:] = time[0]*0.0 \n \n time *= np.exp(1j * 2.0 * np.pi * block.frequency_shift[i] * xx) \n time *= np.exp(1j * block.phase_0[i] * common_constants.DEGREES_TO_RADIANS)\n\n if set.apply_data_exclusion:\n if i not in block.exclude_indices:\n chain.freq_summed += time\n else:\n chain.freq_summed += time\n\n if i == chain.voxel:\n chain.freq_current += time \n \n \n # Calculate final summed Time and Freq arrays with constant phase offset\n\n for i in range(nfids):\n # Apply B0 and Phase0 corrections to raw data to create summed data\n # array for plotting with global Left Shift and Constant Phase0 \n # values applied. This is also the result for this tab\n\n time = chain.raw[0,0,i,:].copy()\n\n if set.fid_left_shift != 0:\n # shift fid to the left and set last points to zero\n time = np.roll(time, -set.fid_left_shift) \n time[-set.fid_left_shift:] = time[0]*0.0 \n \n time *= np.exp(1j * 2.0 * np.pi * block.frequency_shift[i] * xx) \n time *= np.exp(1j * block.phase_0[i] * common_constants.DEGREES_TO_RADIANS)\n time *= np.exp(1j * set.constant_phase0_offset * common_constants.DEGREES_TO_RADIANS)\n\n if set.apply_data_exclusion:\n if i not in block.exclude_indices:\n chain.freq_summed_offset += time # for display\n chain.time_summed_offset += time # result for tab\n else: \n chain.freq_summed_offset += time # for display\n chain.time_summed_offset += time # result for tab\n \n # Last steps to prepare the display arrays for plotting in Freq domain\n\n chain.freq_current[0] *= 0.5 \n chain.freq_current = (np.fft.fft(chain.freq_current * apod) / raw_dim0) * nfids # nfids for comparison plot\n \n chain.freq_summed[0] *= 0.5\n chain.freq_summed = (np.fft.fft(chain.freq_summed * apod) / raw_dim0)\n\n chain.freq_summed_offset[0] *= 0.5\n chain.freq_summed_offset = (np.fft.fft(chain.freq_summed_offset * apod) / raw_dim0)\n \n\n",
"# Python modules\n\n# 3rd party modules\nimport numpy as np\n\n# Our modules\nfrom vespa.analysis.algos.cross_correlate import cross_correlate\n\n\n\ndef b0_correction( dat, ref, magn=False, cdeg=1, nlag=None):\n \"\"\"\n B0 correction algorithm number 9. Uses correlation of data region to an ideal\n spectrum to determine integer shift of data to match.\n\n INPUT:\n dat: complex array, raw data\n ref: complex array, ideal spectrum\n KEYWORDS:\n CORR: correlation value array, [magn,real,imag]\n CSUM: sum of real and imag ccvals\n NLAG: number of points to lag in correlation\n CDEG: real and imag cc val mixing degree\n MAGN: flag, performs calc only on Magnitude data when set\n\n \"\"\"\n error = 0\n corr = [-1.0,-1.0,-1.0]\n csum = -2.0\n\n ndat = len(dat)\n\n if ndat != len(ref):\n return 0.0\n\n if not nlag:\n nlag = ndat\n\n shft = 0.0\n ndat2 = int(ndat/2)\n nlag2 = int(nlag/2)\n lag = np.arange(nlag) - nlag2\n indx = [0.0,0.0,0.0]\n\n tmp = cross_correlate(np.abs(dat),np.abs(ref), lag)\n\n corr[0] = np.max(tmp)\n indx[0] = np.argmax(tmp)\n shft = indx[0] - nlag2 + 1 # based on empirical test with Prior data sets \n csum = 0.0\n\n if not magn:\n tmp = cross_correlate(dat.real, ref.real, lag)\n corr[1] = np.max(tmp)\n indx[1] = np.argmax(tmp)\n tmp = cross_correlate(dat.imag, ref.imag, lag)\n corr[2] = np.max(tmp)\n indx[2] = np.argmax(tmp)\n\n isort = np.argsort(corr)\n shft = indx[isort[2]] - nlag2 + 1\n csum = corr[1]**cdeg + corr[2]**cdeg\n\n return int(shft), csum",
"# Python modules\n\n\n# 3rd party modules\nimport numpy as np\nfrom xml.etree.cElementTree import Element\n\n# Our modules\nimport vespa.analysis.block_prep_identity as block_prep_identity\nimport vespa.analysis.chain_prep_megalaser as chain_prep_megalaser\nimport vespa.analysis.block as block\n\nimport vespa.common.util.xml_ as util_xml\nimport vespa.common.mrs_data_raw as mrs_data_raw\n\nfrom vespa.common.constants import Deflate\n\n\n\nclass _Settings(object):\n \"\"\"\n Settings object contains the parameter inputs used for processing in the \n Chain object in this Block. Having a separate object helps to delineate \n inputs/outputs and to simplify load/save of preset values.\n\n This object can also save/recall these values to/from an XML node.\n\n \"\"\"\n XML_VERSION = \"1.0.0\"\n\n \n def __init__(self, attributes=None):\n\n self.fid_left_shift = 0\n self.gaussian_apodization = 2.0\n self.global_phase1 = 0.0\n \n self.apply_peak_shift = True\n self.reference_peak_center = 2.01\n self.peak_search_width = 0.2\n self.fid_left_shift_b0 = 56\n \n self.apply_phase0 = True\n self.phase0_range_start = 2.2\n self.phase0_range_end = 1.8\n self.fid_left_shift_phase0 = 56\n self.ref_spectrum_source = 'singlet_centered_in_range'\n self.ref_peak_line_width = 18\n self.constant_phase0_offset = 70 # degrees\n \n if attributes is not None:\n self.inflate(attributes)\n\n\n def __str__(self):\n\n lines = []\n lines.append(\"------- {0} Object -------\".format(self.__class__.__name__))\n lines.append(\"fid_left_shift : \" + str(self.fid_left_shift))\n lines.append(\"gaussian_apodization : \" + str(self.gaussian_apodization))\n lines.append(\"apply_peak_shift : \" + str(self.apply_peak_shift))\n lines.append(\"reference_peak_center : \" + str(self.reference_peak_center))\n lines.append(\"peak_search_width : \" + str(self.peak_search_width))\n lines.append(\"fid_left_shift_b0 : \" + str(self.fid_left_shift_b0))\n lines.append(\"apply_phase0 : \" + str(self.apply_phase0))\n lines.append(\"phase0_range_start : \" + str(self.phase0_range_start))\n lines.append(\"phase0_range_end : \" + str(self.phase0_range_end))\n lines.append(\"fid_left_shift_phase0 : \" + str(self.fid_left_shift_phase0))\n lines.append(\"ref_spectrum_source : \" + str(self.ref_spectrum_source))\n lines.append(\"ref_peak_line_width : \" + str(self.ref_peak_line_width))\n lines.append(\"constant_phase0_offset : \" + str(self.constant_phase0_offset))\n return '\\n'.join(lines)\n\n\n def deflate(self, flavor=Deflate.ETREE):\n if flavor == Deflate.ETREE:\n e = Element(\"settings\", {\"version\" : self.XML_VERSION})\n\n util_xml.TextSubElement(e, \"fid_left_shift\", self.fid_left_shift)\n util_xml.TextSubElement(e, \"gaussian_apodization\", self.gaussian_apodization)\n util_xml.TextSubElement(e, \"global_phase1\", self.global_phase1)\n util_xml.TextSubElement(e, \"apply_peak_shift\", self.apply_peak_shift)\n util_xml.TextSubElement(e, \"reference_peak_center\", self.reference_peak_center)\n util_xml.TextSubElement(e, \"peak_search_width\", self.peak_search_width)\n util_xml.TextSubElement(e, \"fid_left_shift_b0\", self.fid_left_shift_b0)\n util_xml.TextSubElement(e, \"apply_phase0\", self.apply_phase0)\n util_xml.TextSubElement(e, \"phase0_range_start\", self.phase0_range_start)\n util_xml.TextSubElement(e, \"phase0_range_end\", self.phase0_range_end)\n util_xml.TextSubElement(e, \"fid_left_shift_phase0\", self.fid_left_shift_phase0)\n util_xml.TextSubElement(e, \"ref_spectrum_source\", self.ref_spectrum_source)\n util_xml.TextSubElement(e, \"ref_peak_line_width\", self.ref_peak_line_width)\n util_xml.TextSubElement(e, \"constant_phase0_offset\", self.constant_phase0_offset)\n\n return e\n \n elif flavor == Deflate.DICTIONARY:\n return self.__dict__.copy()\n\n\n def inflate(self, source):\n if hasattr(source, \"makeelement\"):\n # Quacks like an ElementTree.Element\n\n for name in (\"reference_peak_center\", \n \"gaussian_apodization\", \n \"peak_search_width\", \n \"global_phase1\", \n 'phase0_range_start', \n 'phase0_range_end'):\n item = source.findtext(name)\n if item is not None:\n setattr(self, name, float(item))\n\n for name in (\"fid_left_shift\", \n \"fid_left_shift_b0\",\n \"fid_left_shift_phase0\", \n \"ref_peak_line_width\",\n \"constant_phase0_offset\"):\n item = source.findtext(name)\n if item is not None:\n setattr(self, name, int(item))\n\n for name in (\"apply_peak_shift\", \n \"apply_phase0\", ):\n item = source.findtext(name)\n if item is not None:\n setattr(self, name, util_xml.BOOLEANS[item])\n\n for name in (\"ref_spectrum_source\",):\n item = source.findtext(name)\n if item is not None:\n setattr(self, name, item)\n\n\n elif hasattr(source, \"keys\"):\n # Quacks like a dict\n for key in list(source.keys()):\n if hasattr(self, key):\n setattr(self, key, source[key])\n\n\n\n\nclass BlockPrepMegalaser(block_prep_identity.BlockPrepIdentity):\n \"\"\" \n Building block to hold the state of a step in an MRS processing chain.\n Includes the functionality to save/recall this object to/from an XML node.\n\n Contains inputs/results for preprocessing of the raw data from the previous\n block ('raw') in the dataset.blocks list. This step modifies coil/average\n data into a single summed FID array for one dataset.\n \n \"\"\"\n XML_VERSION = \"1.0.0\"\n \n def __init__(self, attributes=None):\n \"\"\"\n Block objects have a self.set attribute that contains a _Settings object\n that contains the input attributes for the processing done in the Chain.\n This simplifies using a Block object as a preset. Results from the Chain\n are stored in this object at the level of self.set\n\n Base class sets references to: self.id, self.data, self.chain and self.behave_as_preset\n \n \"\"\"\n super().__init__(attributes) \n \n # processing parameters\n self.set = _Settings()\n\n # results storage\n self.frequency_shift = None\n self.phase_0 = None\n self.data = None\n \n if attributes is not None:\n self.inflate(attributes)\n\n self.chain = None\n\n\n ##### Standard Methods and Properties #####################################\n\n\n @property\n def dims(self):\n \"\"\"Data dimensions in a list, read only.\"\"\"\n return list(self.data.shape[::-1]) if self.data is not None else None\n \n\n def __str__(self):\n lines = []\n lines.append(\"------- {0} Object -------\".format(self.__class__.__name__))\n lines.append(\"\\n\")\n lines += _Settings.__str__(self).split('\\n')\n lines.append(\"\\n\")\n lines.append(\"------- Main Object -------\")\n lines.append(\"Data shape : %s\" % str(self.dims))\n\n return '\\n'.join(lines)\n\n\n def create_chain(self, dataset):\n self.chain = chain_prep_megalaser.ChainPrepMegalaser(dataset, self)\n\n\n def set_dims(self, dataset):\n \"\"\"\n Given a Dataset object, this is an opportunity for this block object \n to ensure that its dims match those of the parent dataset. \n \"\"\"\n block.Block.set_dims(self, dataset)\n\n # local reference to input data\n raw = dataset.get_source_data('prep')\n\n # this is the calculated proper size for self.data\n fidsum_dims = [raw.shape[-1],1,1,1]\n\n if not self.dims or self.dims != fidsum_dims: \n self._reset_dimensional_data(dataset)\n\n\n def _reset_dimensional_data(self, dataset):\n \"\"\"\n Resets (to zero) and resizes dimensionally-dependent data\n \n \"\"\"\n # local reference to input data\n raw = dataset.get_source_data('prep')\n\n n_fids = raw.shape[-2]\n\n self.frequency_shift = np.zeros([n_fids])\n self.phase_0 = np.zeros([n_fids])\n\n self.data = np.zeros((1,1,1,raw.shape[-1]), dtype=raw.dtype)\n if self.chain is not None:\n self.chain.reset_results_arrays()\n \n \n def concatenate(self, new):\n raise NotImplementedError\n\n\n def deflate(self, flavor=Deflate.ETREE):\n if flavor == Deflate.ETREE:\n\n e = Element(\"block_prep_megalaser\",{\"id\" : self.id,\n \"version\" : self.XML_VERSION})\n\n util_xml.TextSubElement(e, \"behave_as_preset\", self.behave_as_preset)\n \n # Now I deflate the attribs that are specific to this class\n e.append(self.set.deflate())\n \n if not self.behave_as_preset:\n\n e.append(util_xml.numpy_array_to_element(self.frequency_shift,'frequency_shift'))\n e.append(util_xml.numpy_array_to_element(self.phase_0,'phase_0'))\n e.append(util_xml.numpy_array_to_element(self.data, 'data'))\n\n return e\n\n elif flavor == Deflate.DICTIONARY:\n return self.__dict__.copy()\n\n\n def inflate(self, source):\n if hasattr(source, \"makeelement\"):\n\n val = source.findtext(\"behave_as_preset\") # default is False\n if val is not None:\n self.behave_as_preset = util_xml.BOOLEANS[val]\n\n # Quacks like an ElementTree.Element\n self.set = _Settings(source.find(\"settings\"))\n\n if not self.behave_as_preset:\n \n # Now I inflate the attribs that are specific to this class\n temp = source.find(\"frequency_shift\")\n self.frequency_shift = util_xml.element_to_numpy_array(temp)\n temp = source.find(\"phase_0\")\n self.phase_0 = util_xml.element_to_numpy_array(temp)\n temp = source.find(\"data\")\n self.data = util_xml.element_to_numpy_array(temp)\n\n\n elif hasattr(source, \"keys\"):\n # Quacks like a dict\n for key in list(source.keys()):\n if key == \"set\":\n setattr(self, key, source[key])\n\n\n ##### Private Methods #####################################\n\n \n \n \n \n \n",
"# Python modules\n\n# 3rd party modules\nimport numpy as np\n\n# Our modules\nimport vespa.analysis.functors.funct_spectral_all as funct_spectral_all\nfrom vespa.analysis.chain_base import Chain\n\n\n\nclass ChainSpectral(Chain):\n \"\"\"\n Building block object used to create a processing chain for MRS data.\n\n Processes coil-combined, averaged FID data into frequency domain. Applies\n (optionally) ECC, water removal, phase, dc, apodization, left-shift.\n\n \"\"\"\n\n def __init__(self, dataset, block):\n \"\"\"\n Chain objects organize Algo (algorithm) calls by setting up access to\n input data and parameters, and creating standard output values for View.\n\n Base class sets convenience references to: self._block and self._dataset\n\n self.data is always initialized as []\n\n \"\"\"\n super().__init__(dataset, block)\n\n self.raw_dims = self._dataset.raw_dims\n self.raw_dim0 = self._dataset.raw_dims[0]\n self.raw_hpp = self._dataset.raw_hpp\n \n # processing functor - provides entry points for chain\n self.functor_all = funct_spectral_all.do_processing_all\n\n self.reset_results_arrays()\n\n\n\n def reset_results_arrays(self):\n \"\"\"\n A separate method so it can be called outside __init__. Should\n create/set enough results to keep View happy if run() fails.\n\n \"\"\"\n spectral_dim0 = self._dataset.spectral_dims[0]\n if len(self.data) != spectral_dim0:\n self.pre_roll = np.zeros(self.raw_dim0, complex)\n self.kodata = np.zeros(self.raw_dim0, complex)\n self.freq = np.zeros(spectral_dim0, complex)\n self.data = np.zeros(spectral_dim0, complex)\n\n self.time_fids = np.zeros((20,self.raw_dim0), complex)\n self.sum_time_fids = np.zeros(self.raw_dim0, complex)\n\n self.svd_data = np.zeros(spectral_dim0, complex)\n self.svd_peaks_checked = np.zeros((20,spectral_dim0), complex)\n self.svd_fids_all = np.zeros((20,self.raw_dim0), complex)\n self.svd_peaks_checked_sum = np.zeros(spectral_dim0, complex)\n \n\n\n def run(self, voxels, entry='all'):\n \"\"\"\n Run is typically called every time a processing setting is changed\n in the parent (block) object. Run processes a single voxel at a time.\n\n This object maintains previous run() results values until next run().\n This allows the View to update without having to re-run the pipeline.\n\n The 'entry' keyword adds flexibility to Block-Chain-View relationship.\n\n \"\"\"\n\n # Get 'global' parameters, that DO NOT change with voxel, from Dataset\n # - these processing/data parameters have to be updated at run time \n self.spectral_dims = self._dataset.spectral_dims\n self.spectral_dim0 = self._dataset.spectral_dims[0]\n self.spectral_hpp = self._dataset.spectral_hpp\n self.zero_fill_multiplier = self._dataset.zero_fill_multiplier\n self.phase_1_pivot = self._dataset.phase_1_pivot\n \n for voxel in voxels:\n # local copy of input data\n self.data = self._dataset.get_source_data('spectral')\n self.data = self.data[voxel[2],voxel[1],voxel[0],:]\n self.data = self.data.copy()\n\n # copy 'global' parameters, that DO change with voxel, from Dataset\n self.frequency_shift = self._dataset.get_frequency_shift(voxel)\n self.phase0 = self._dataset.get_phase_0(voxel)\n self.phase1 = self._dataset.get_phase_1(voxel)\n \n # copy block parameters, that DO change with voxel, from Block\n svd_output = self._block.get_svd_output(voxel)\n\n self.ndp = self._block.get_data_point_count(voxel)\n self.nssv = self._block.get_signal_singular_value_count(voxel)\n self.do_fit = self._block.get_do_fit(voxel)\n self.svd_output = svd_output\n self.voxel = voxel\n\n # select the chain processing functor based on the entry point\n if entry == 'all':\n self.functor_all(self)\n else:\n print('oooops!')\n\n # save data and parameter results into the Block results arrays\n self._block.data[voxel[2],voxel[1],voxel[0],:] = self.freq.copy()\n self._block.set_svd_output(self.svd_output, voxel)\n self._block.set_do_fit(self.do_fit, voxel)\n\n # Return values specific to calling Tab that contains this Block.Chain\n # Used to update its self.view (plot_panel_spectrum object).\n\n plot_results = { 'svd_data' : self.svd_data.copy(),\n 'svd_peaks_checked' : self.svd_peaks_checked.copy(),\n 'svd_peaks_checked_sum' : self.svd_peaks_checked_sum.copy(),\n 'svd_fids_checked_sum' : self.svd_fids_checked.copy(),\n 'freq' : self.freq.copy() }\n \n return plot_results\n"
] |
[
[
"numpy.abs",
"numpy.fft.fft",
"numpy.arange",
"numpy.roll",
"numpy.exp",
"numpy.zeros",
"numpy.sum"
],
[
"numpy.abs",
"numpy.arange",
"numpy.max",
"numpy.argmax",
"numpy.argsort"
],
[
"numpy.zeros"
],
[
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
adamjorr/kbbq-py
|
[
"a1b6049458ec03d305c4f4148aad325a3867d627"
] |
[
"tests/test_recalibrate.py"
] |
[
"import pytest\nfrom test_compare_reads import FakeRead, bamread_to_fakeread\nimport kbbq.main\nfrom kbbq import recalibrate\nfrom kbbq import compare_reads\nimport pysam\nimport numpy as np\n\ndef test_find_corrected_sites(simple_fastq_reads):\n for r in simple_fastq_reads:\n r2 = pysam.FastxRecord(name = r.name, sequence = r.sequence, quality = r.quality)\n edited_seq = list(r2.sequence)\n edited_seq[5] = 'C'\n r2.sequence = ''.join(edited_seq)\n correct = np.zeros(len(edited_seq), dtype = np.bool)\n correct[5] = True\n assert np.array_equal(recalibrate.find_corrected_sites(r,r2), correct)\n\[email protected]()\ndef uncorr_and_corr_fastq_files(tmp_path):\n uncorr_fastq = tmp_path / 'uncorr.fq'\n corrected_fastq = tmp_path / 'corr.fq'\n with open(uncorr_fastq,'w') as fu, open(corrected_fastq,'w') as fc:\n r = pysam.FastxRecord(\n name = 'foo',\n sequence = 'ATG',\n quality = '((#') #7, 7, 2\n r2 = pysam.FastxRecord(\n name = r.name,\n sequence = 'ACG',\n quality = r.quality)\n fu.write(str(r))\n fc.write(str(r2))\n return str(uncorr_fastq), str(corrected_fastq)\n\[email protected]()\ndef uncorr_and_corr_with_rg(tmp_path):\n uncorr_fastq = tmp_path / 'uncorr_withrg.fq'\n corrected_fastq = tmp_path / 'corr_withrg.fq'\n with open(uncorr_fastq,'w') as fu, open(corrected_fastq,'w') as fc:\n r = pysam.FastxRecord(\n name = 'foo/1_RG:Z:bar',\n sequence = 'ATG',\n quality = '((#') #7, 7, 2\n r2 = pysam.FastxRecord(\n name = r.name,\n sequence = 'ACG',\n quality = r.quality)\n fu.write(str(r))\n fc.write(str(r2))\n return str(uncorr_fastq), str(corrected_fastq)\n\ndef test_fastq_to_covariate_arrays(uncorr_and_corr_fastq_files, uncorr_and_corr_with_rg):\n correct_pos_errs = np.zeros((1,43,6))\n correct_pos_total = np.zeros((1,43,6))\n correct_pos_errs[0,7,1] = 1\n correct_pos_total[0,7,0] = 1\n correct_pos_total[0,7,1] = 1\n correct_dinuc_errs = np.zeros((1,43,16))\n correct_dinuc_total = np.zeros((1,43,16))\n correct_dinuc_errs[0, 7, compare_reads.Dinucleotide.dinuc_to_int['AT']] = 1\n correct_dinuc_total[0, 7, compare_reads.Dinucleotide.dinuc_to_int['AT']] = 1\n correct_vectors = [np.array([6]), #meanq\n np.array([1]), #rg\n np.array([2]), #rg\n np.array([[0,0,0,0,0,0,0,1] + [0] * 35]), #q\n np.array([[0,0,0,0,0,0,0,2] + [0] * 35]), #q\n correct_pos_errs, #pos\n correct_pos_total, #pos\n correct_dinuc_errs, #dinuc\n correct_dinuc_total] #diunc\n\n for a,b in zip(correct_vectors, recalibrate.fastq_to_covariate_arrays(\n uncorr_and_corr_fastq_files)):\n assert np.array_equal(a,b)\n for a,b in zip(correct_vectors, recalibrate.fastq_to_covariate_arrays(\n uncorr_and_corr_with_rg, infer_rg = True)):\n assert np.array_equal(a,b)\n\n#this read is used below\ncorrect_read = pysam.FastxRecord(\n name = 'foo',\n sequence = 'ATG',\n quality = '\\'\\'#') #6, 6, 2\n\ncorrect_read_with_rg = pysam.FastxRecord(\n name = 'foo/1_RG:Z:bar',\n sequence = 'ATG',\n quality = '\\'\\'#')\n\ndef test_recalibrate_fastq(uncorr_and_corr_fastq_files, uncorr_and_corr_with_rg, capfd):\n recalibrate.recalibrate_fastq(uncorr_and_corr_fastq_files)\n captured = capfd.readouterr()\n assert captured.out == str(correct_read) + '\\n'\n\n #now test with infer_rg = True\n recalibrate.recalibrate_fastq(uncorr_and_corr_with_rg, infer_rg = True)\n captured = capfd.readouterr()\n assert captured.out == str(correct_read_with_rg) + '\\n'\n\n #TODO: we may want test 1000x this read to see a more realistic example\n\ndef test_recalibrate_bam():\n with pytest.raises(NotImplementedError):\n recalibrate.recalibrate_bam(None)\n\ndef test_recalibrate(uncorr_and_corr_fastq_files, capfd):\n recalibrate.recalibrate(bam = None, fastq = uncorr_and_corr_fastq_files)\n captured = capfd.readouterr()\n assert captured.out == str(correct_read) + '\\n'\n\n with pytest.raises(NotImplementedError):\n recalibrate.recalibrate(fastq = None, bam = 'foo')\n\n with pytest.raises(NotImplementedError):\n recalibrate.recalibrate(fastq = None, bam = None, gatkreport = 'foo')\n\n with pytest.raises(ValueError):\n recalibrate.recalibrate(fastq = None, bam = None, gatkreport = None)\n\ndef test_recalibrate_main(uncorr_and_corr_fastq_files, monkeypatch, capfd):\n import sys\n with monkeypatch.context() as m:\n m.setattr(sys, 'argv', [sys.argv[0]] + [\"recalibrate\",'-f'] + list(uncorr_and_corr_fastq_files) )\n kbbq.main.main()\n captured = capfd.readouterr()\n assert captured.out == str(correct_read) + '\\n'\n\n with pytest.raises(NotImplementedError), monkeypatch.context() as m:\n m.setattr(sys, 'argv', [sys.argv[0]] + [\"recalibrate\",'-b', 'foo'])\n kbbq.main.main()\n\n with pytest.raises(NotImplementedError), monkeypatch.context() as m:\n m.setattr(sys, 'argv', [sys.argv[0]] + [\"recalibrate\",'-b', 'foo', '-g', 'bar'])\n kbbq.main.main()\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.array_equal"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
kigesui/open-covid-19-data
|
[
"d40d86fc2c8f53462677f19e86f9f84f0810ca52"
] |
[
"tests/test_exported_data.py"
] |
[
"#!/usr/bin/python\n#\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport os\nimport pandas as pd\nimport sys\nimport streamlit as st\n\nPIPELINE_DIR = os.path.join(os.path.abspath(os.path.join(os.path.dirname(__file__), '../')), 'src/pipeline')\n\nsys.path.append(PIPELINE_DIR)\n\nimport path_utils\n\nAGGREGATED_EXPORT_FILES = ['cc_by/aggregated_cc_by.csv',\n 'cc_by_sa/aggregated_cc_by_sa.csv',\n 'cc_by_nc/aggregated_cc_by_nc.csv']\n\ndef test_location_and_date_unique():\n for f in AGGREGATED_EXPORT_FILES:\n export_path = os.path.join(path_utils.path_to('export_dir'), f)\n exported_df = pd.read_csv(export_path)\n duplicates = exported_df[exported_df[['open_covid_region_code', 'date']].duplicated(keep=False)]\n duplicate_info = duplicates[['open_covid_region_code', 'date']]\n print(duplicate_info)\n assert duplicates.shape[0] == 0\n\ntest_location_and_date_unique()\n"
] |
[
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
skyyuntian/Giotto
|
[
"6a107ef568161f5d2e65184a72b3b699ed918d33"
] |
[
"inst/python/reader.py"
] |
[
"#!/usr/bin/python\nfrom smfishHmrf.HMRFInstance import HMRFInstance\nfrom smfishHmrf.DatasetMatrix import DatasetMatrix, DatasetMatrixSingleField, DatasetMatrixMultiField\nfrom smfishHmrf.spatial import rank_transform_matrix, calc_silhouette_per_gene\nimport sys\nimport os\nimport math\nimport subprocess\nimport numpy as np\nimport scipy\nimport scipy.stats\nfrom scipy.stats import zscore\nfrom scipy.spatial.distance import euclidean, squareform, pdist\nimport smfishHmrf.reader as reader\nimport pandas as pd\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom smfishHmrf.bias_correction import calc_bias_moving, do_pca, plot_pca\nfrom scipy.cluster.vq import kmeans2\nimport argparse\n\ndef read_centroid(n, cells):\n\tmap_cell = {}\n\tfor ind,val in enumerate(cells):\n\t\tmap_cell[val] = ind\n\n\tf = open(n)\n\tnum_cell = 0\n\tfor l in f:\n\t\tl = l.rstrip(\"\\n\")\n\t\tnum_cell+=1\n\tf.close()\n\tXcen = np.empty((num_cell, 2), dtype=\"float32\")\n\tfield = np.empty((num_cell), dtype=\"int32\")\n\tf = open(n)\n\tfor l in f:\n\t\tl = l.rstrip(\"\\n\")\n\t\tll = l.split()\n\t\tx1, x2 = float(ll[0]), float(ll[1])\n\t\tt_id = map_cell[ll[-1]]\n\t\t#t_id = int(ll[-1].split(\"_\")[1]) - 1\n\t\tXcen[t_id, :] = [x1, x2]\n\t\tfield[t_id] = 100\n\tf.close()\n\treturn Xcen, field\n\ndef read_graph(n, cells):\n\tmap_cell = {}\n\tfor ind,val in enumerate(cells):\n\t\tmap_cell[val] = ind\n\n\tf = open(n)\n\tedges = set([])\n\tfor l in f:\n\t\tl = l.rstrip(\"\\n\")\n\t\tll = l.split(\"\\t\")\n\t\te1, e2 = ll\n\t\te1_id = map_cell[e1]\n\t\te2_id = map_cell[e2]\n\t\t#e1_id = int(e1.split(\"_\")[1])-1\n\t\t#e2_id = int(e2.split(\"_\")[1])-1\n\t\tedges.add(tuple(sorted([e1_id, e2_id])))\n\tf.close()\n\treturn edges\n\ndef read_expression_classic(n):\n\tf = open(n)\n\th = f.readline().rstrip(\"\\n\").split()\n\tnum_cell = len(h)\n\tnum_gene = 0\n\tfor l in f:\n\t\tl = l.rstrip(\"\\n\")\n\t\tll = l.split()\n\t\t#gene = ll[0]\n\t\tnum_gene+=1\n\tf.close()\n\tmat = np.empty((num_gene, num_cell), dtype=\"float32\")\n\tgenes = []\n\tcells = h\n\tf = open(n)\n\tf.readline()\n\tgid = 0\n\tfor l in f:\n\t\tl = l.rstrip(\"\\n\")\n\t\tll = l.split()\n\t\tgenes.append(ll[0])\n\t\tmat[gid, :] = [float(v) for v in ll[1:]]\n\t\tgid+=1\n\tf.close()\n\treturn mat, genes, cells\n\ndef connected_components(edges, adjacent, points):\n\tvisited = {}\n\tchains = []\n\tfor p in points:\n\t\tvisited[p] = False\n\tfor p in points:\n\t\tif visited[p]==False:\n\t\t\tnew_chain = []\n\t\t\tvisited, new_chain = DFS(p, adjacent, visited, new_chain)\n\t\t\tchains.append(new_chain)\n\treturn chains\n\ndef DFS(p, adjacent, visited, new_chain):\n\tvisited[p] = True\n\tnew_chain.append(p)\n\tfor nei in adjacent[p]:\n\t\tif visited[nei]==False:\n\t\t\tvisited, new_chain = DFS(nei, adjacent, visited, new_chain)\n\treturn visited, new_chain\n\nif __name__==\"__main__\":\n\tparser = argparse.ArgumentParser(description=\"HMRF.\", formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\tparser.add_argument(\"-l\", \"--location\", dest=\"location\", type=str, required=True)\n\tparser.add_argument(\"-g\", \"--genes\", dest=\"genes\", type=str, required=True)\n\tparser.add_argument(\"-n\", \"--network\", dest=\"network\", type=str, required=True)\n\tparser.add_argument(\"-e\", \"--expression\", dest=\"expression\", type=str, required=True)\n\tparser.add_argument(\"-o\", \"--outdir\", dest=\"outdir\", type=str, required=True)\n\tparser.add_argument(\"-a\", \"--name\", dest=\"name\", type=str, required=True)\n\t\n\tparser.add_argument(\"-k\", \"--k\", dest=\"k\", type=int, required=True)\n\tparser.add_argument(\"-b\", \"--betas\", help=\"three numbers: start_beta, beta_increment, num_beta (e.g. 0 2.0 50)\", nargs=3, dest=\"betas\", type=float, required=True)\n\tparser.add_argument(\"-t\", \"--tolerance\", dest=\"tolerance\", type=float, help=\"tolerance value\", default=1e-10)\n\tparser.add_argument(\"-z\", \"--zscore\", type=str, dest=\"zscore\", choices=[\"rowcol\", \"colrow\", \"none\"], default=\"none\", help=\"zscore the matrix after subsetting to spatial genes. Rowcol: row(gene) first, column(cell) next.\")\n\tparser.add_argument(\"-i\", \"--numinit\", type=int, dest=\"num_init\", default=100, help=\"number of initializations\")\n\n\targs = parser.parse_args()\n\n\tsys.setrecursionlimit(50000)\n\t#print args\n\t#sys.exit(0)\t\n\t\n\tmat, genes, cells = read_expression_classic(args.expression)\n\tprint(\"Done reading expression\")\n\tXcen, field = read_centroid(args.location, cells)\n\tprint(\"Done reading location\")\n\n\t#mat = pd.read_table(args.expression, sep=\" \", header=0, index_col=0)\n\t\n\t#print mat.index\n\t'''\n\tgenes = []\n\tfor g in range(mat.index.shape[0]):\n\t\tgenes.append(str(mat.index[g]))\n\t#print genes\n\texpr = np.copy(mat.values)\n\t'''\n\tgenes_good = reader.read_genes(args.genes)\n\texpr = mat\n\t\n\n\tnew_dset = DatasetMatrixSingleField(expr, genes, None, Xcen)\t\n\tedges = read_graph(args.network, cells)\n\tprint(\"Done reading graph\")\n\tpoints = set([])\n\tadjacent = {}\n\tfor e1,e2 in edges:\n\t\tpoints.add(e1)\n\t\tpoints.add(e2)\n\tncell = expr.shape[1]\n\tngene = expr.shape[0]\n\t#print ncell, ngene\n\n\t'''\t\n\tdist = pdist(Xcen, metric=\"euclidean\")\n\tdist = squareform(dist)\n\tfor i in range(ncell):\n\t\tif i in points: continue\n\t\tdist_i = sorted([(dist[i,j],j) for j in range(ncell) if i!=j])\n\t\tedges.add(tuple(sorted([i, dist_i[0][1]])))\n\t'''\n\tfor e1,e2 in edges:\n\t\tadjacent.setdefault(e1, set([]))\n\t\tadjacent.setdefault(e2, set([]))\n\t\tadjacent[e1].add(e2)\n\t\tadjacent[e2].add(e1)\n\tnew_dset.edges = edges\n\tnew_dset.adjacent = adjacent\n\n\tprint(\"Start calculating independent regions\")\n\tconn = connected_components(edges, adjacent, points)\n\n\tblocks = {}\n\tfor ind_con,con in enumerate(conn):\n\t\tall_vert = con\n\t\tset_all_vert = set(all_vert)\n\t\tmap_vert = {}\n\t\tfor ind,val in enumerate(all_vert):\n\t\t\tmap_vert[val] = ind\n\t\tprint(\"Edges for component\", ind_con)\n\t\tedge_file = \"/tmp/edges.txt\"\n\t\tblock_file = \"/tmp/blocks.txt\"\n\t\tfw = open(edge_file, \"w\")\n\t\tfor e1, e2 in edges:\n\t\t\tif e1 in set_all_vert and e2 in set_all_vert:\n\t\t\t\tfw.write(\"%d %d\\n\" % (map_vert[e1]+1, map_vert[e2]+1))\n\t\tfw.close()\n\t\timport smfishHmrf\n\t\tthis_path = os.path.dirname(smfishHmrf.__file__) + \"/graphColoring\"\n\t\tsubprocess.call(\"java -cp %s -Xmx32g -Xms32g GraphColoring %s %s\" % (this_path, edge_file, block_file), shell=True)\n\t\t\n\t\tf = open(block_file)\n\t\tb_ind = 0\n\t\tfor l in f:\n\t\t\tl = l.rstrip(\"\\n\")\n\t\t\tll = l.split()\n\t\t\t#self.blocks.append(int(ll[1]))\n\t\t\tblocks[all_vert[b_ind]] = int(ll[1])\n\t\t\tb_ind+=1\n\t\tf.close()\n\t\t#self.blocks = np.array(self.blocks)\n\n\tnew_blocks = []\n\tfor b in range(0, len(blocks.keys())):\n\t\tnew_blocks.append(blocks[b])\n\tnew_dset.blocks = np.array(new_blocks)\n\tprint(\"Finished calculating independent regions\")\n\n\n\t'''\n\tprint(\"Start calculating independent region\")\n\tnew_dset.calc_independent_region()\n\tprint(\"Finished calculating independent region\")\n\t'''\n\n\tt_dset = new_dset.subset_genes(genes_good)\n\n\tif args.zscore==\"colrow\":\n\t\tt_dset.expr = zscore(t_dset.expr, axis=0) #per column (cell)\n\t\tt_dset.expr = zscore(t_dset.expr, axis=1) #per row (gene)\n\telif args.zscore==\"rowcol\":\n\t\tt_dset.expr = zscore(t_dset.expr, axis=1) #per row (gene)\n\t\tt_dset.expr = zscore(t_dset.expr, axis=0) #per col (cell)\n\t\t\n\toutdir = args.outdir\n\tst_beta, incr_beta, num_beta = args.betas\n\tst_beta = float(st_beta)\n\tincr_beta = float(incr_beta)\n\tnum_beta = int(num_beta)\n\tif not os.path.isdir(outdir):\n\t\tos.mkdir(outdir)\n\tthis_hmrf = HMRFInstance(args.name, outdir, t_dset, args.k, st_beta, incr_beta, num_beta, tolerance=args.tolerance)\n\tthis_hmrf.init(nstart=args.num_init)\n\tthis_hmrf.run()\n"
] |
[
[
"numpy.array",
"scipy.stats.zscore",
"numpy.empty"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
fzalkow/PCP
|
[
"45f705fa5b6e86eb6fe1269221979e5cb1d4fa57"
] |
[
"LibPCP/complex.py"
] |
[
"\"\"\"\nSource: PCP Notebooks (https://www.audiolabs-erlangen.de/PCP)\nModule: LibPCP.complex\nAuthor: Meinard Mueller, International Audio Laboratories Erlangen\nLicense: The MIT license, https://opensource.org/licenses/MIT\n\"\"\"\nimport os\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.colors import LinearSegmentedColormap\n\n\ndef generate_figure(figsize=(2, 2), xlim=[0, 1], ylim=[0, 1]):\n \"\"\"Generate figure for plotting complex numbers\n Notebook: PCP_complex.ipynb\"\"\"\n plt.figure(figsize=figsize)\n plt.grid()\n plt.xlim(xlim)\n plt.ylim(ylim)\n plt.xlabel('$\\mathrm{Re}$')\n plt.ylabel('$\\mathrm{Im}$')\n\n\ndef plot_vector(c, color='k', start=0, linestyle='-'):\n \"\"\"Plot arrow corresponding to difference of two complex numbers\n\n Notebook: PCP_complex.ipynb\n\n Args:\n c: Complex number\n color: Color of arrow\n start: Complex number encoding the start position\n linestyle: Linestyle of arrow\n\n Returns:\n plt.arrow: matplotlib.patches.FancyArrow\n \"\"\"\n return plt.arrow(np.real(start), np.imag(start), np.real(c), np.imag(c),\n linestyle=linestyle, head_width=0.05,\n fc=color, ec=color, overhang=0.3, length_includes_head=True)\n\n\ndef plot_polar_vector(c, label=None, color=None, start=0, linestyle='-'):\n \"\"\"Plot arrow in polar plot\n Notebook: PCP_complex.ipynb\"\"\"\n # plot line in polar plane\n line = plt.polar([np.angle(start), np.angle(c)], [np.abs(start), np.abs(c)], label=label,\n color=color, linestyle=linestyle)\n # plot arrow in same color\n this_color = line[0].get_color() if color is None else color\n plt.annotate('', xytext=(np.angle(start), np.abs(start)), xy=(np.angle(c), np.abs(c)),\n arrowprops=dict(facecolor=this_color, edgecolor='none',\n headlength=12, headwidth=10, shrink=1, width=0))\n\n\ndef exercise_complex(show_result=True):\n \"\"\"Exercise 1: Rotate Complex Number\n Notebook: PCP_complex.ipynb\"\"\"\n if show_result is False:\n return\n\n c_abs = 1.2\n c_angle = 20 # in degree\n c_angle_rad = np.deg2rad(c_angle)\n a = c_abs * np.cos(c_angle_rad)\n b = c_abs * np.sin(c_angle_rad)\n c = a + b*1j\n c_conj = np.conj(c)\n c_inv = 1 / c\n generate_figure(figsize=(5, 2.5), xlim=[-0.25, 1.75], ylim=[-0.5, 0.5])\n v1 = plot_vector(c, color='k')\n v2 = plot_vector(c_conj, color='b')\n v3 = plot_vector(c_inv, color='r')\n plt.legend([v1, v2, v3], ['$c$', r'$\\overline{c}$', '$c^{-1}$'])\n\n def rotate_complex(c, r):\n c_angle_rad = np.angle(c) - np.deg2rad(r)\n c_abs = np.abs(c)\n a = c_abs * np.cos(c_angle_rad)\n b = c_abs * np.sin(c_angle_rad)\n c_rot = a + b*1j\n return c_rot\n\n c = 1 + 0.5*1j\n generate_figure(figsize=(5, 2.5), xlim=[-0.25, 1.75], ylim=[-0.25, 0.75])\n v1 = plot_vector(c, color='k')\n v2 = plot_vector(rotate_complex(c, 10), color='b')\n v3 = plot_vector(rotate_complex(c, 20), color='g')\n v4 = plot_vector(rotate_complex(c, 30), color='r')\n plt.legend([v1, v2, v3, v4], ['$c$', '$r=10$', '$r=20$', '$r=30$'])\n\n\ndef exercise_polynomial(show_result=True):\n \"\"\"Exercise 2: Roots of Polynomial\n Notebook: PCP_complex.ipynb\"\"\"\n if show_result is False:\n return\n\n def vis_root(p, ax, title=''):\n poly_root = np.roots(p)\n ax.scatter(np.real(poly_root), np.imag(poly_root), color='red')\n ax.grid()\n ax.set_title(title)\n ax.set_xlabel('$\\mathrm{Re}$')\n ax.set_ylabel('$\\mathrm{Im}$')\n\n fig, ax = plt.subplots(2, 3, figsize=(10, 6))\n\n p = np.array([1, 0, -2])\n vis_root(p, ax[0, 0], title='$p(z)=z^2-2$')\n\n p = np.array([1, 0, 2])\n vis_root(p, ax[0, 1], title='$p(z)=z^2+2$')\n\n p = np.array([1, 0, 0, 0, 0, 0, 0, 0, -1])\n vis_root(p, ax[0, 2], '$p(z)=z^8-1$')\n\n p = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0])\n vis_root(p, ax[1, 0], '$p(z)=z^8 + z^7 + z^5$')\n\n p = np.array([1, 1, 1, 0, 0, 0, 0, 0, 0.000001])\n vis_root(p, ax[1, 1], '$p(z)=z^8 + z^7 + z^5 + 0.000001$')\n\n p = np.array([1, -2j, 2 + 4j, 3])\n vis_root(p, ax[1, 2], '$p(z)=z^3 -2iz^2 + (2+4i)z + 3 $')\n\n plt.tight_layout()\n\n\ndef exercise_mandelbrot(show_result=True):\n \"\"\"Exercise 3: Mandelbrot Set\n Notebook: PCP_complex.ipynb\"\"\"\n if show_result is False:\n return\n\n a_min = -2\n a_max = 1\n b_min = -1.2\n b_max = 1.2\n a_delta = 0.01\n b_delta = 0.01\n\n A, B = np.meshgrid(np.arange(a_min, a_max+a_delta, a_delta),\n np.arange(b_min, b_max+b_delta, b_delta))\n M = A.shape[0]\n N = A.shape[1]\n C = A + B*1j\n\n iter_max = 50\n thresh = 100\n mandel = np.ones((M, N))\n\n for m in range(M):\n for n in range(N):\n c = C[m, n]\n z = 0\n for k in range(iter_max):\n z = z * z + c\n if np.abs(z) > thresh:\n mandel[m, n] = 0\n break\n\n plt.figure(figsize=(6, 4))\n extent = [a_min, a_max, b_min, b_max]\n plt.imshow(mandel, origin='lower', cmap='gray_r', extent=extent)\n\n\ndef exercise_mandelbrot_fancy(show_result=True, save_file=False):\n \"\"\"Exercise 3: Mandelbrot Set (more fancy version)\n Notebook: PCP_complex.ipynb\"\"\"\n if show_result is False:\n return\n\n a_min = -2\n a_max = 1\n b_min = -1.2\n b_max = 1.2\n a_delta = 0.005\n b_delta = 0.005\n\n A, B = np.meshgrid(np.arange(a_min, a_max+a_delta, a_delta),\n np.arange(b_min, b_max+b_delta, b_delta))\n M = A.shape[0]\n N = A.shape[1]\n C = A + B*1j\n\n iter_max = 100\n thresh = 1000\n mandel_iter = np.zeros((M, N))\n\n np.warnings.filterwarnings('ignore')\n Z = np.zeros((M, N))\n for k in range(iter_max):\n Z = Z * Z + C\n ind = (np.abs(Z) > thresh)\n mandel_iter[ind] = k\n Z[ind] = np.nan\n\n Z[np.isnan(Z)] = thresh\n mandel = (np.abs(Z) < thresh).astype(int)\n\n color_wb = LinearSegmentedColormap.from_list('color_wb', [[1, 1, 1, 0], [0, 0, 0, 1]], N=2)\n\n plt.figure(figsize=(8, 6))\n extent = [a_min, a_max, b_min, b_max]\n plt.imshow(np.log(np.log(mandel_iter)), origin='lower', cmap='YlOrBr_r', extent=extent)\n plt.imshow(mandel, origin='lower', cmap=color_wb, extent=extent)\n if save_file==True:\n output_path_filename = os.path.join('.', 'output', 'Mandelbrot.png')\n plt.savefig(output_path_filename)\n"
] |
[
[
"matplotlib.pyplot.legend",
"matplotlib.pyplot.imshow",
"numpy.imag",
"matplotlib.pyplot.tight_layout",
"numpy.arange",
"numpy.sin",
"numpy.roots",
"numpy.real",
"numpy.warnings.filterwarnings",
"numpy.zeros",
"matplotlib.pyplot.figure",
"numpy.log",
"numpy.isnan",
"matplotlib.pyplot.ylim",
"matplotlib.pyplot.savefig",
"numpy.deg2rad",
"matplotlib.colors.LinearSegmentedColormap.from_list",
"numpy.array",
"matplotlib.pyplot.ylabel",
"numpy.conj",
"numpy.abs",
"matplotlib.pyplot.subplots",
"numpy.cos",
"numpy.ones",
"matplotlib.pyplot.xlim",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"numpy.angle"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
matejklemen/slovene-coreference-resolution
|
[
"3dc19c03dcef244c4cc4f6e0415915455014964d"
] |
[
"src/data.py"
] |
[
"import os\nimport logging\nimport csv\nimport pandas as pd\nfrom collections import OrderedDict\n\nfrom bs4 import BeautifulSoup\n\nDUMMY_ANTECEDENT = None\n\n#####################\n# GLOBAL PARAMETERS\n#####################\n# Path \"./data/*\" assumes you are running from root folder, i.e. (python /src/baseline.py)\n# Use path \"../data/*\" if you are running from src folder, i.e. (cd src) and then (python baseline.py)\nCOREF149_DIR = os.environ.get(\"COREF149_DIR\", \"../data/coref149\")\nSENTICOREF_DIR = os.environ.get(\"SENTICOREF149_DIR\", \"../data/senticoref1_0\")\nSENTICOREF_METADATA_DIR = \"../data/senticoref_pos_stanza\"\nSSJ_PATH = os.environ.get(\"SSJ_PATH\", \"../data/ssj500k-sl.TEI/ssj500k-sl.body.reduced.xml\")\n\n\ndef _read_tokens(corpus_soup):\n \"\"\" Obtain all tokens in current document.\n\n Arguments\n ---------\n corpus_soup: bs4.element.Tag\n Wrapped XML element containing the document (<tc:TextCorpus ...> tag).\n\n Returns\n -------\n dict[str, str]:\n Mapping of token IDs to raw tokens\n \"\"\"\n id_to_tok = OrderedDict()\n for i, el in enumerate(corpus_soup.findAll(\"tc:token\")):\n token_id = el[\"id\"]\n token = el.text.strip()\n id_to_tok[token_id] = token\n return id_to_tok\n\n\ndef _read_sentences(corpus_soup):\n \"\"\" Obtain all sentences in current document.\n\n Returns\n -------\n tuple:\n (list[list[str]], dict[str, list]):\n (1.) token IDs, organized into sentences\n (2.) token IDs to [index of sentence, index of token inside sentence]\n \"\"\"\n sent_tok_ids = []\n tok_to_position = {}\n for idx_sent, el in enumerate(corpus_soup.findAll(\"tc:sentence\")):\n token_ids = el[\"tokenids\"].split(\" \")\n for idx_tok, tok in enumerate(token_ids):\n tok_to_position[tok] = [idx_sent, idx_tok]\n sent_tok_ids.append(token_ids)\n return sent_tok_ids, tok_to_position\n\n\ndef _read_coreference(corpus_soup):\n \"\"\" Obtain all mentions and coreference clusters in current document.\n\n Returns\n -------\n tuple:\n (dict[str, list[str]], list[list[str]]):\n (1.) mentions\n (2.) mentions organized by coreference cluster\n \"\"\"\n mentions = {}\n clusters = []\n for cluster_obj in corpus_soup.findAll(\"tc:entity\"):\n curr_cluster = []\n for mention_obj in cluster_obj.findAll(\"tc:reference\"):\n mention_id = mention_obj[\"id\"]\n mention_tokens = mention_obj[\"tokenids\"].split(\" \")\n mentions[mention_id] = mention_tokens\n curr_cluster.append(mention_id)\n\n clusters.append(curr_cluster)\n return mentions, clusters\n\n\n# Create a dictionary where each mention points to its antecedent (or the dummy antecedent)\ndef _coreference_chain(clusters_list):\n mapped_clusters = {}\n for curr_cluster in clusters_list:\n for i, curr_mention in enumerate(curr_cluster):\n mapped_clusters[curr_mention] = DUMMY_ANTECEDENT if i == 0 else curr_cluster[i - 1]\n return mapped_clusters\n\n\nclass Token:\n def __init__(self, token_id, raw_text, lemma, msd, sentence_index, position_in_sentence, position_in_document):\n self.token_id = token_id\n\n self.raw_text = raw_text\n self.lemma = lemma\n self.msd = msd\n\n self.sentence_index = sentence_index\n self.position_in_sentence = position_in_sentence\n self.position_in_document = position_in_document\n\n if msd is not None:\n self.gender = self._extract_gender(msd)\n self.number = self._extract_number(msd)\n self.category = msd[0]\n\n def __str__(self):\n return f\"Token(\\\"{self.raw_text}\\\")\"\n\n def _extract_number(self, msd_string):\n number = None\n if msd_string[0] == \"S\" and len(msd_string) >= 4: # noun/samostalnik\n number = msd_string[3]\n elif msd_string[0] == \"G\" and len(msd_string) >= 6: # verb/glagol\n number = msd_string[5]\n # P = adjective (pridevnik), Z = pronoun (zaimek), K = numeral (števnik)\n elif msd_string[0] in {\"P\", \"Z\", \"K\"} and len(msd_string) >= 5:\n number = msd_string[4]\n\n return number\n\n def _extract_gender(self, msd_string):\n gender = None\n if msd_string[0] == \"S\" and len(msd_string) >= 3: # noun/samostalnik\n gender = msd_string[2]\n elif msd_string[0] == \"G\" and len(msd_string) >= 7: # verb/glagol\n gender = msd_string[6]\n # P = adjective (pridevnik), Z = pronoun (zaimek), K = numeral (števnik)\n elif msd_string[0] in {\"P\", \"Z\", \"K\"} and len(msd_string) >= 4:\n gender = msd_string[3]\n\n return gender\n\n\nclass Mention:\n def __init__(self, mention_id, tokens):\n self.mention_id = mention_id\n self.tokens = tokens\n\n def __str__(self):\n return f\"Mention(\\\"{' '.join([tok.raw_text for tok in self.tokens])}\\\")\"\n\n def raw_text(self):\n return \" \".join([t.raw_text for t in self.tokens])\n\n def lemma_text(self):\n return \" \".join([t.lemma for t in self.tokens if t.lemma is not None])\n\n\nclass Document:\n def __init__(self, doc_id, tokens, sentences, mentions, clusters,\n metadata=None):\n self.doc_id = doc_id # type: str\n self.tokens = tokens # type: dict\n self.sents = sentences # type: list\n self.mentions = mentions # type: dict\n self.clusters = clusters # type: list\n self.mapped_clusters = _coreference_chain(self.clusters)\n self.metadata = metadata\n\n def raw_sentences(self):\n \"\"\" Returns list of sentences in document. \"\"\"\n return [list(map(lambda t: self.tokens[t].raw_text, curr_sent)) for curr_sent in self.sents]\n\n def __len__(self):\n return len(self.tokens)\n\n def __str__(self):\n return f\"Document('{self.doc_id}', {len(self.tokens)} tokens)\"\n\n\ndef sorted_mentions_dict(mentions):\n # sorted() produces an array of (key, value) tuples, which we turn back into dictionary\n sorted_mentions = dict(sorted(mentions.items(),\n key=lambda tup: (tup[1].tokens[0].sentence_index, # sentence\n tup[1].tokens[0].position_in_sentence, # start pos\n tup[1].tokens[-1].position_in_sentence))) # end pos\n\n return sorted_mentions\n\n\ndef read_senticoref_doc(file_path):\n # Temporary cluster representation:\n # {cluster1 index: { mention1_idx: ['mention1', 'tokens'], mention2_idx: [...] }, cluster2_idx: {...} }\n _clusters = {}\n # Temporary buffer for current sentence\n _curr_sent = []\n\n sents = []\n id_to_tok = {}\n tok_to_position = {}\n idx_sent, idx_inside_sent = 0, 0\n mentions, clusters = {}, []\n\n doc_id = file_path.split(os.path.sep)[-1][:-4] # = file name without \".tsv\"\n # Note: `quoting=csv.QUOTE_NONE` is required as otherwise some documents can't be read\n # Note: `keep_default_na=False` is required as there's a typo in corpus (\"NA\"), interpreted as <missing>\n curr_annotations = pd.read_table(file_path, comment=\"#\", sep=\"\\t\", index_col=False, quoting=csv.QUOTE_NONE,\n names=[\"token_index\", \"start_end\", \"token\", \"NamedEntity\", \"Polarity\",\n \"referenceRelation\", \"referenceType\"], keep_default_na=False)\n curr_metadata = pd.read_table(os.path.join(SENTICOREF_METADATA_DIR, f\"{doc_id}.tsv\"), sep=\"\\t\", index_col=False,\n quoting=csv.QUOTE_NONE, header=0, keep_default_na=False)\n\n metadata = {\"tokens\": {}}\n for i, (tok_id, ref_info, token) in enumerate(curr_annotations[[\"token_index\", \"referenceRelation\", \"token\"]].values):\n # Token is part of some mention\n if ref_info != \"_\":\n # Token can be part of multiple mentions\n ref_annotations = ref_info.split(\"|\")\n\n for mention_info in ref_annotations:\n cluster_idx, mention_idx = list(map(int, mention_info[3:].split(\"-\"))) # skip \"*->\"\n\n curr_mentions = _clusters.get(cluster_idx, {})\n curr_mention_tok_ids = curr_mentions.get(mention_idx, [])\n curr_mention_tok_ids.append(tok_id)\n curr_mentions[mention_idx] = curr_mention_tok_ids\n\n _clusters[cluster_idx] = curr_mentions\n\n _curr_sent.append(tok_id)\n tok_to_position[tok_id] = [idx_sent, idx_inside_sent]\n id_to_tok[tok_id] = token\n idx_inside_sent += 1\n\n text, pos_tag, lemma = curr_metadata.iloc[i].values\n metadata[\"tokens\"][tok_id] = {\"ana\": pos_tag, \"lemma\": lemma, \"text\": text}\n\n # Segment sentences heuristically\n if token in {\".\", \"!\", \"?\"}:\n idx_sent += 1\n idx_inside_sent = 0\n sents.append(_curr_sent)\n _curr_sent = []\n\n # If the document doesn't end with proper punctuation\n if len(_curr_sent) > 0:\n sents.append(_curr_sent)\n\n # --- generate token objects\n final_tokens = OrderedDict()\n for index, (tok_id, tok_raw) in enumerate(id_to_tok.items()):\n final_tokens[tok_id] = Token(\n tok_id,\n tok_raw,\n metadata[\"tokens\"][tok_id][\"lemma\"] if \"lemma\" in metadata[\"tokens\"][tok_id] else None,\n metadata[\"tokens\"][tok_id][\"ana\"].split(\":\")[1],\n tok_to_position[tok_id][0],\n tok_to_position[tok_id][1],\n index\n )\n # ---\n\n mention_counter = 0\n for idx_cluster, curr_mentions in _clusters.items():\n curr_cluster = []\n for idx_mention, mention_tok_ids in curr_mentions.items():\n # assign coref149-style IDs to mentions\n mention_id = f\"rc_{mention_counter}\"\n mention_tokens = list(map(lambda tok_id: final_tokens[tok_id], mention_tok_ids))\n mentions[mention_id] = Mention(mention_id, mention_tokens)\n\n curr_cluster.append(mention_id)\n mention_counter += 1\n clusters.append(curr_cluster)\n\n return Document(doc_id, final_tokens, sents, sorted_mentions_dict(mentions), clusters, metadata=metadata)\n\n\ndef read_coref149_doc(file_path, ssj_doc):\n with open(file_path, encoding=\"utf8\") as f:\n content = f.readlines()\n content = \"\".join(content)\n soup = BeautifulSoup(content, \"lxml\").find(\"tc:textcorpus\")\n\n doc_id = file_path.split(os.path.sep)[-1][:-4] # = file name without \".tcf\"\n\n # Read data as defined in coref149\n tokens = _read_tokens(soup)\n sents, tok_to_position = _read_sentences(soup)\n mentions, clusters = _read_coreference(soup)\n\n # Tokens have different IDs in ssj500k, so remap coref149 style to ssj500k style\n idx_sent_coref, idx_token_coref = 0, 0\n _coref_to_ssj = {} # mapping from coref ids to ssj ids\n for curr_sent in ssj_doc.findAll(\"s\"):\n for curr_token in curr_sent.findAll([\"w\", \"pc\"]):\n coref_token_id = sents[idx_sent_coref][idx_token_coref]\n ssj_token_id = curr_token[\"xml:id\"]\n\n # Warn in case tokenization is different between datasets (we are slightly screwed in that case)\n if curr_token.text.strip() != tokens[coref_token_id]:\n logging.warning(f\"MISMATCH! '{curr_token.text.strip()}' (ssj500k ID: {ssj_token_id}) vs \"\n f\"'{tokens[coref_token_id]}' (coref149 ID: {coref_token_id})\")\n\n _coref_to_ssj[coref_token_id] = ssj_token_id\n idx_token_coref += 1\n if idx_token_coref == len(sents[idx_sent_coref]):\n idx_sent_coref += 1\n idx_token_coref = 0\n\n # sentences are composed of ssj token IDs\n fixed_sents = [[_coref_to_ssj[curr_id] for curr_id in curr_sent] for curr_sent in sents]\n\n # Write all metadata for tokens\n # Note: currently not writing SRL/dependency metadata\n metadata = {\"tokens\": {}}\n for token in ssj_doc.findAll([\"w\", \"c\", \"pc\"]):\n token_id = token.get(\"xml:id\", None)\n\n if token_id:\n metadata[\"tokens\"][token_id] = token.attrs\n metadata[\"tokens\"][token_id][\"text\"] = token.text\n\n final_tokens = OrderedDict()\n for index, (coref_token_id, raw_text) in enumerate(tokens.items()):\n ssj_token_id = _coref_to_ssj[coref_token_id] # mapping of coref token ID to ssj token ID\n final_tokens[ssj_token_id] = Token(\n ssj_token_id,\n raw_text,\n metadata[\"tokens\"][ssj_token_id][\"lemma\"] if \"lemma\" in metadata[\"tokens\"][ssj_token_id] else None,\n metadata[\"tokens\"][ssj_token_id][\"ana\"].split(\":\")[1],\n tok_to_position[coref_token_id][0], # Note: tok_to_pos uses coref IDs, not ssj IDs\n tok_to_position[coref_token_id][1],\n index)\n\n final_mentions = {}\n for mention_id, mention_tokens in mentions.items():\n token_objs = [final_tokens[_coref_to_ssj[tok_id]] for tok_id in mention_tokens]\n final_mentions[mention_id] = Mention(mention_id, token_objs)\n\n # TODO: is metadata required here? metadata for tokens has been moved to token object\n return Document(doc_id, final_tokens, fixed_sents, sorted_mentions_dict(final_mentions), clusters, metadata=metadata)\n\n\ndef read_corpus(name):\n SUPPORTED_DATASETS = {\"coref149\", \"senticoref\"}\n if name not in SUPPORTED_DATASETS:\n raise ValueError(f\"Unsupported dataset (must be one of {SUPPORTED_DATASETS})\")\n\n if name == \"coref149\":\n with open(SSJ_PATH, encoding=\"utf8\") as ssj:\n content = ssj.readlines()\n content = \"\".join(content)\n ssj_soup = BeautifulSoup(content, \"lxml\")\n\n doc_to_soup = {}\n for curr_soup in ssj_soup.findAll(\"p\"):\n doc_to_soup[curr_soup[\"xml:id\"]] = curr_soup\n\n doc_ids = [f[:-4] for f in os.listdir(COREF149_DIR)\n if os.path.isfile(os.path.join(COREF149_DIR, f)) and f.endswith(\".tcf\")]\n return [read_coref149_doc(os.path.join(COREF149_DIR, f\"{curr_id}.tcf\"), doc_to_soup[curr_id]) for curr_id in doc_ids]\n else:\n doc_ids = [f[:-4] for f in os.listdir(SENTICOREF_DIR)\n if os.path.isfile(os.path.join(SENTICOREF_DIR, f)) and f.endswith(\".tsv\")]\n\n return [read_senticoref_doc(os.path.join(SENTICOREF_DIR, f\"{curr_id}.tsv\")) for curr_id in doc_ids]\n\n\nif __name__ == \"__main__\":\n DATASET_NAME = \"senticoref\"\n documents = read_corpus(DATASET_NAME)\n print(f\"Read {len(documents)} documents\")\n\n # http://nl.ijs.si/ME/Vault/V5/msd/html/msd-sl.html#msd.categories-sl\n if DATASET_NAME == \"senticoref\":\n # English tags - because tags are predicted with Stanza\n char_tag_to_pos = dict(zip([\"N\", \"V\", \"A\", \"R\", \"P\", \"M\", \"S\", \"C\", \"Q\", \"I\", \"Y\", \"X\", \"Z\"],\n [\"samostalnik\", \"glagol\", \"pridevnik\", \"prislov\", \"zaimek\", \"števnik\",\n \"predlog\", \"veznik\", \"členek\", \"medmet\", \"okrajšava\", \"neuvrščeno\", \"ločilo\"]))\n elif DATASET_NAME == \"coref149\":\n char_tag_to_pos = dict(zip([\"S\", \"G\", \"P\", \"R\", \"Z\", \"K\", \"D\", \"V\", \"L\", \"M\", \"O\", \"N\", \"U\"],\n [\"samostalnik\", \"glagol\", \"pridevnik\", \"prislov\", \"zaimek\", \"števnik\",\n \"predlog\", \"veznik\", \"členek\", \"medmet\", \"okrajšava\", \"neuvrščeno\", \"ločilo\"]))\n pos_to_idx = {c: i for i, c in enumerate(char_tag_to_pos.values())}\n pos_count = [0 for _ in range(len(pos_to_idx))]\n for doc in documents:\n for mention_id, mention in doc.mentions.items():\n first_token = mention.tokens[0] # type: Token\n curr_tag = char_tag_to_pos[first_token.msd[0]]\n\n pos_count[pos_to_idx[curr_tag]] += 1\n\n print(\"besedna_vrsta,frekvenca\")\n for curr_pos in pos_to_idx:\n print(f\"{curr_pos},{pos_count[pos_to_idx[curr_pos]]}\")\n\n entity_size_count = {} # entity/cluster size -> number of such entities\n mentions_by_documents = {} # number of mentions -> number of documents with this amount of mentions\n for doc in documents:\n num_mentions = 0\n for curr_cluster in doc.clusters:\n cluster_size = len(curr_cluster)\n num_mentions += cluster_size\n entity_size_count[cluster_size] = entity_size_count.get(cluster_size, 0) + 1\n\n mentions_by_documents[num_mentions] = mentions_by_documents.get(num_mentions, 0) + 1\n\n print(\"\\nvelikost_entitete,frekvenca\")\n for curr_size, num_mentions in sorted(entity_size_count.items(), key=lambda tup: tup[0]):\n print(f\"{curr_size},{num_mentions}\")\n\n print(\"\\nštevilo_omenitev_v_dokumentu,frekvenca\")\n for curr_num_mentions, num_docs in sorted(mentions_by_documents.items(), key=lambda tup: tup[0]):\n print(f\"{curr_num_mentions},{num_docs}\")\n\n dist_between_mentions = {} # dist between consecutive mentions (in num. of mentions) -> frequency of this distance\n for doc in documents:\n sorted_mentions = sorted([(mention_id,\n curr_mention.tokens[0].position_in_document,\n curr_mention.tokens[-1].position_in_document)\n for mention_id, curr_mention in doc.mentions.items()],\n key=lambda triple: (triple[1], triple[2]))\n mention_id_to_rank = {mention_id: rank for rank, (mention_id, _, _) in enumerate(sorted_mentions)}\n\n for curr_cluster in doc.clusters:\n sorted_cluster = sorted(curr_cluster, key=lambda m_id: (doc.mentions[m_id].tokens[0].position_in_document,\n doc.mentions[m_id].tokens[-1].position_in_document))\n\n for m1_id, m2_id in zip(sorted_cluster, sorted_cluster[1:]):\n # Distance 0 = mentions right next to eachother when ordered by position\n rank_diff = mention_id_to_rank[m2_id] - mention_id_to_rank[m1_id] - 1\n\n dist_between_mentions[rank_diff] = dist_between_mentions.get(rank_diff, 0) + 1\n\n print(\"\\nrazdalja_med_zaporednima_omenitvama_iste_entitete,frekvenca\")\n for curr_dist, num_mentions in sorted(dist_between_mentions.items(), key=lambda tup: tup[0]):\n print(f\"{curr_dist},{num_mentions}\")\n\n"
] |
[
[
"pandas.read_table"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
SNAPsoftware/ANLffr
|
[
"0dae88e0867ed75f8bad5e16e778a2aa27373e0d"
] |
[
"anlffr/preproc.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nModule for preprocessing utilities. These are supposed to complement\nthe preproessing utilities that are available with MNE.\n\n\"\"\"\nimport numpy as np\n\nfrom math import ceil\nfrom anlffr.utils import logger, verbose\nfrom mne import pick_channels\nfrom mne.filter import filter_data\n\n\n@verbose\ndef find_blinks(raw, event_id=998, thresh=100e-6, l_freq=0.5, h_freq=10,\n filter_length='auto', ch_name=['A1', ], tstart=0.,\n l_trans_bandwidth=0.15):\n\n \"\"\"Utility function to detect blink events from specified channel.\n\n Parameters\n ----------\n raw : instance of Raw\n The raw data.\n event_id : int\n The index to assign to found events.\n low_pass : float\n Low pass frequency.\n high_pass : float\n High pass frequency.\n filter_length : str | int | None\n Number of taps to use for filtering.\n ch_name: list | None\n If not None, use specified channel(s) for EOG\n tstart : float\n Start detection after tstart seconds.\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n eog_events : array\n Events in MNE format, i.e., N x 3 array\n \"\"\"\n\n sampling_rate = raw.info['sfreq']\n first_samp = raw.first_samp\n\n ch_eog = pick_channels(raw.ch_names, include=ch_name)\n\n if len(ch_eog) == 0:\n raise ValueError('%s not in channel list' % ch_name)\n else:\n logger.info('Detecting blinks from channel %s' % ch_name)\n\n eog, _ = raw[ch_eog, :]\n filteog = filter_data(eog, sampling_rate, l_freq, h_freq,\n filter_length=filter_length,\n l_trans_bandwidth=l_trans_bandwidth)\n\n eog_events, blinkvals = peak_finder(filteog.squeeze(), thresh=thresh)\n eog_events_neg, blinkvals_neg = peak_finder(filteog.squeeze(),\n thresh=thresh, extrema=-1)\n\n # Discarding blinks that don't look like other blinks, electing polarity\n nominal_blink = np.median(np.abs(blinkvals))\n nominal_blink_neg = np.median(np.abs(blinkvals_neg))\n\n if nominal_blink_neg > nominal_blink:\n blinkvals = blinkvals_neg\n nominal_blink = nominal_blink_neg\n eog_events = eog_events_neg\n\n eog_events = eog_events[np.logical_and(np.abs(blinkvals) < 2*nominal_blink,\n np.abs(blinkvals) >\n 0.5*nominal_blink)]\n\n # Discarding blinks detected before tstart seconds\n eog_events = eog_events[eog_events > raw.time_as_index(tstart)]\n eog_events += first_samp\n n_events = len(eog_events)\n logger.info(\"Number of EOG events detected : %d\" % n_events)\n eog_events = np.c_[eog_events, np.zeros(n_events),\n event_id * np.ones(n_events)]\n\n return np.int64(eog_events)\n\n\n@verbose\ndef peak_finder(x0, thresh=None, extrema=1, verbose=None):\n \"\"\"Noise tolerant fast peak finding algorithm\n\n Parameters\n ----------\n x0 : 1d array\n A real vector from the maxima will be found (required).\n thresh : float\n The amount above surrounding data for a peak to be\n identified (default = (max(x0)-min(x0))/4). Larger values mean\n the algorithm is more selective in finding peaks.\n extrema : {-1, 1}\n 1 if maxima are desired, -1 if minima are desired\n (default = maxima, 1).\n verbose : bool, str, int, or None\n If not None, override default verbose level (see mne.verbose).\n\n Returns\n -------\n peak_loc : array\n The indices of the identified peaks in x0\n peak_mag : array\n The magnitude of the identified peaks\n\n Note\n ----\n If repeated values are found the first is identified as the peak.\n Conversion from initial Matlab code from:\n Nathanael C. Yoder ([email protected])\n\n Example\n -------\n t = 0:.0001:10;\n x = 12*sin(10*2*pi*t)-3*sin(.1*2*pi*t)+randn(1,numel(t));\n x(1250:1255) = max(x);\n peak_finder(x)\n \"\"\"\n\n x0 = np.asanyarray(x0)\n\n if x0.ndim >= 2:\n raise ValueError('The input data must be a 1D vector')\n\n s = x0.size\n\n if thresh is None:\n thresh = (np.max(x0) - np.min(x0)) / 4\n\n assert extrema in [-1, 1]\n\n if extrema == -1:\n x0 = extrema * x0 # Make it so we are finding maxima regardless\n\n dx0 = np.diff(x0) # Find derivative\n # This is so we find the first of repeated values\n dx0[dx0 == 0] = -np.finfo(float).eps\n # Find where the derivative changes sign\n ind = np.where(dx0[:-1:] * dx0[1::] < 0)[0] + 1\n\n # Include endpoints in potential peaks and valleys\n x = np.concatenate((x0[:1], x0[ind], x0[-1:]))\n ind = np.concatenate(([0], ind, [s - 1]))\n\n # x only has the peaks, valleys, and endpoints\n length = x.size\n min_mag = np.min(x)\n\n if length > 2: # Function with peaks and valleys\n\n # Set initial parameters for loop\n temp_mag = min_mag\n found_peak = False\n left_min = min_mag\n\n # Deal with first point a little differently since tacked it on\n # Calculate the sign of the derivative since we taked the first point\n # on it does not necessarily alternate like the rest.\n signDx = np.sign(np.diff(x[:3]))\n if signDx[0] <= 0: # The first point is larger or equal to the second\n ii = -1\n if signDx[0] == signDx[1]: # Want alternating signs\n x = np.concatenate((x[:1], x[2:]))\n ind = np.concatenate((ind[:1], ind[2:]))\n length -= 1\n\n else: # First point is smaller than the second\n ii = 0\n if signDx[0] == signDx[1]: # Want alternating signs\n x = x[1:]\n ind = ind[1:]\n length -= 1\n\n # Preallocate max number of maxima\n maxPeaks = int(ceil(length / 2.0))\n peak_loc = np.zeros(maxPeaks, dtype=np.int)\n peak_mag = np.zeros(maxPeaks)\n c_ind = 0\n # Loop through extrema which should be peaks and then valleys\n while ii < (length - 1):\n ii += 1 # This is a peak\n # Reset peak finding if we had a peak and the next peak is bigger\n # than the last or the left min was small enough to reset.\n if found_peak and ((x[ii] > peak_mag[-1]) or\n (left_min < peak_mag[-1] - thresh)):\n temp_mag = min_mag\n found_peak = False\n\n # Make sure we don't iterate past the length of our vector\n if ii == length - 1:\n break # We assign the last point differently out of the loop\n\n # Found new peak that was lager than temp mag and threshold larger\n # than the minimum to its left.\n if (x[ii] > temp_mag) and (x[ii] > left_min + thresh):\n temp_loc = ii\n temp_mag = x[ii]\n\n ii += 1 # Move onto the valley\n # Come down at least thresh from peak\n if not found_peak and (temp_mag > (thresh + x[ii])):\n found_peak = True # We have found a peak\n left_min = x[ii]\n peak_loc[c_ind] = temp_loc # Add peak to index\n peak_mag[c_ind] = temp_mag\n c_ind += 1\n elif x[ii] < left_min: # New left minima\n left_min = x[ii]\n\n # Check end point\n if (x[-1] > temp_mag) and (x[-1] > (left_min + thresh)):\n peak_loc[c_ind] = length - 1\n peak_mag[c_ind] = x[-1]\n c_ind += 1\n elif not found_peak and temp_mag > min_mag:\n # Check if we still need to add the last point\n peak_loc[c_ind] = temp_loc\n peak_mag[c_ind] = temp_mag\n c_ind += 1\n\n # Create output\n peak_inds = ind[peak_loc[:c_ind]]\n peak_mags = peak_mag[:c_ind]\n else: # This is a monotone function where an endpoint is the only peak\n x_ind = np.argmax(x)\n peak_mags = x[x_ind]\n if peak_mags > (min_mag + thresh):\n peak_inds = ind[x_ind]\n else:\n peak_mags = []\n peak_inds = []\n\n # Change sign of data if was finding minima\n if extrema < 0:\n peak_mags *= -1.0\n x0 = -x0\n\n # Plot if no output desired\n if len(peak_inds) == 0:\n logger.info('No significant peaks found')\n\n return peak_inds, peak_mags\n"
] |
[
[
"numpy.abs",
"numpy.min",
"numpy.finfo",
"numpy.concatenate",
"numpy.int64",
"numpy.max",
"numpy.asanyarray",
"numpy.diff",
"numpy.argmax",
"numpy.ones",
"numpy.zeros",
"numpy.where"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xiadeye/PaddleOCR
|
[
"ffecf10688f1af030db8f8dd3f70def9a2875595"
] |
[
"tools/infer_det.py"
] |
[
"# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nfrom copy import deepcopy\nimport json\n\nimport os\nimport sys\n__dir__ = os.path.dirname(__file__)\nsys.path.append(__dir__)\nsys.path.append(os.path.join(__dir__, '..'))\n\n\ndef set_paddle_flags(**kwargs):\n for key, value in kwargs.items():\n if os.environ.get(key, None) is None:\n os.environ[key] = str(value)\n\n\n# NOTE(paddle-dev): All of these flags should be\n# set before `import paddle`. Otherwise, it would\n# not take any effect.\nset_paddle_flags(\n FLAGS_eager_delete_tensor_gb=0, # enable GC to save memory\n)\n\nfrom paddle import fluid\nfrom ppocr.utils.utility import create_module, get_image_file_list\nimport program\nfrom ppocr.utils.save_load import init_model\nfrom ppocr.data.reader_main import reader_main\nimport cv2\n\nfrom ppocr.utils.utility import initial_logger\nlogger = initial_logger()\n\n\ndef draw_det_res(dt_boxes, config, img, img_name):\n if len(dt_boxes) > 0:\n import cv2\n src_im = img\n for box in dt_boxes:\n box = box.astype(np.int32).reshape((-1, 1, 2))\n cv2.polylines(src_im, [box], True, color=(255, 255, 0), thickness=2)\n save_det_path = os.path.dirname(config['Global'][\n 'save_res_path']) + \"/det_results/\"\n if not os.path.exists(save_det_path):\n os.makedirs(save_det_path)\n save_path = os.path.join(save_det_path, os.path.basename(img_name))\n cv2.imwrite(save_path, src_im)\n logger.info(\"The detected Image saved in {}\".format(save_path))\n\n\ndef main():\n config = program.load_config(FLAGS.config)\n program.merge_config(FLAGS.opt)\n print(config)\n\n # check if set use_gpu=True in paddlepaddle cpu version\n use_gpu = config['Global']['use_gpu']\n program.check_gpu(use_gpu)\n\n place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n exe = fluid.Executor(place)\n\n det_model = create_module(config['Architecture']['function'])(params=config)\n\n startup_prog = fluid.Program()\n eval_prog = fluid.Program()\n with fluid.program_guard(eval_prog, startup_prog):\n with fluid.unique_name.guard():\n _, eval_outputs = det_model(mode=\"test\")\n fetch_name_list = list(eval_outputs.keys())\n eval_fetch_list = [eval_outputs[v].name for v in fetch_name_list]\n\n eval_prog = eval_prog.clone(for_test=True)\n exe.run(startup_prog)\n\n # load checkpoints\n checkpoints = config['Global'].get('checkpoints')\n if checkpoints:\n path = checkpoints\n fluid.load(eval_prog, path, exe)\n logger.info(\"Finish initing model from {}\".format(path))\n else:\n raise Exception(\"{} not exists!\".format(checkpoints))\n\n save_res_path = config['Global']['save_res_path']\n if not os.path.exists(os.path.dirname(save_res_path)):\n os.makedirs(os.path.dirname(save_res_path))\n with open(save_res_path, \"wb\") as fout:\n\n test_reader = reader_main(config=config, mode='test')\n tackling_num = 0\n for data in test_reader():\n img_num = len(data)\n tackling_num = tackling_num + img_num\n logger.info(\"tackling_num:%d\", tackling_num)\n img_list = []\n ratio_list = []\n img_name_list = []\n for ino in range(img_num):\n img_list.append(data[ino][0])\n ratio_list.append(data[ino][1])\n img_name_list.append(data[ino][2])\n\n img_list = np.concatenate(img_list, axis=0)\n outs = exe.run(eval_prog,\\\n feed={'image': img_list},\\\n fetch_list=eval_fetch_list)\n\n global_params = config['Global']\n postprocess_params = deepcopy(config[\"PostProcess\"])\n postprocess_params.update(global_params)\n postprocess = create_module(postprocess_params['function'])\\\n (params=postprocess_params)\n if config['Global']['algorithm'] == 'EAST':\n dic = {'f_score': outs[0], 'f_geo': outs[1]}\n elif config['Global']['algorithm'] == 'DB':\n dic = {'maps': outs[0]}\n else:\n raise Exception(\"only support algorithm: ['EAST', 'DB']\")\n dt_boxes_list = postprocess(dic, ratio_list)\n for ino in range(img_num):\n dt_boxes = dt_boxes_list[ino]\n img_name = img_name_list[ino]\n dt_boxes_json = []\n for box in dt_boxes:\n tmp_json = {\"transcription\": \"\"}\n tmp_json['points'] = box.tolist()\n dt_boxes_json.append(tmp_json)\n otstr = img_name + \"\\t\" + json.dumps(dt_boxes_json) + \"\\n\"\n fout.write(otstr.encode())\n src_img = cv2.imread(img_name)\n draw_det_res(dt_boxes, config, src_img, img_name)\n\n logger.info(\"success!\")\n\n\nif __name__ == '__main__':\n parser = program.ArgsParser()\n FLAGS = parser.parse_args()\n main()\n"
] |
[
[
"numpy.concatenate"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Martinouj/ludwig
|
[
"71ca2189bcee7a2667c428aeb1bf738697cbe83d",
"71ca2189bcee7a2667c428aeb1bf738697cbe83d",
"71ca2189bcee7a2667c428aeb1bf738697cbe83d"
] |
[
"ludwig/utils/tf_utils.py",
"ludwig/features/set_feature.py",
"ludwig/models/modules/attention_modules.py"
] |
[
"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport tensorflow as tf\n\n\ndef sequence_length_3D(sequence):\n used = tf.sign(tf.reduce_max(tf.abs(sequence), 2))\n length = tf.reduce_sum(used, 1)\n length = tf.cast(length, tf.int32)\n return length\n\n\ndef sequence_length_2D(sequence):\n used = tf.sign(tf.abs(sequence))\n length = tf.reduce_sum(used, 1)\n length = tf.cast(length, tf.int32)\n return length\n\n\n# Convert a dense matrix into a sparse matrix (for e.g. edit_distance)\ndef to_sparse(tensor, lengths, max_length):\n mask = tf.sequence_mask(lengths, max_length)\n indices = tf.cast(tf.where(tf.equal(mask, True)), tf.int64)\n values = tf.cast(tf.boolean_mask(tensor, mask), tf.int32)\n shape = tf.cast(tf.shape(tensor), tf.int64)\n return tf.SparseTensor(indices, values, shape)\n\n\ndef get_tf_config(gpus=None, gpu_fraction=1, horovod=None,\n allow_parallel_threads=True):\n intra_op_parallelism_threads = 2 # defult in tensorflow\n inter_op_parallelism_threads = 5 # defult in tensorflow\n if not allow_parallel_threads:\n # this is needed for reproducibility\n intra_op_parallelism_threads = 1\n inter_op_parallelism_threads = 1\n\n if gpus is not None:\n if gpu_fraction > 0 and gpu_fraction < 1:\n # this is the source of freezing in tensorflow 1.3.1\n gpu_options = tf.GPUOptions(\n per_process_gpu_memory_fraction=gpu_fraction,\n allow_growth=True)\n else:\n gpu_options = tf.GPUOptions(allow_growth=True)\n # allow_growth=True is needed for a weird behavior with CUDA 10\n # https://github.com/tensorflow/tensorflow/issues/24828\n if isinstance(gpus, int):\n gpus = [gpus]\n gpu_options.visible_device_list = ','.join(str(g) for g in gpus)\n tf_config = tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=False,\n intra_op_parallelism_threads=intra_op_parallelism_threads,\n inter_op_parallelism_threads=inter_op_parallelism_threads,\n gpu_options=gpu_options)\n else:\n tf_config = tf.ConfigProto(allow_soft_placement=True,\n log_device_placement=False,\n intra_op_parallelism_threads=intra_op_parallelism_threads,\n inter_op_parallelism_threads=inter_op_parallelism_threads)\n\n if horovod is not None:\n tf_config.gpu_options.visible_device_list = str(horovod.local_rank())\n\n return tf_config\n",
"#! /usr/bin/env python\n# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport tensorflow as tf\n\nfrom ludwig.constants import *\nfrom ludwig.features.base_feature import BaseFeature\nfrom ludwig.features.base_feature import InputFeature\nfrom ludwig.features.base_feature import OutputFeature\nfrom ludwig.features.feature_utils import set_str_to_idx\nfrom ludwig.models.modules.embedding_modules import EmbedSparse\nfrom ludwig.models.modules.initializer_modules import get_initializer\nfrom ludwig.utils.misc import set_default_value\nfrom ludwig.utils.strings_utils import create_vocabulary\n\nlogger = logging.getLogger(__name__)\n\n\nclass SetBaseFeature(BaseFeature):\n def __init__(self, feature):\n super().__init__(feature)\n self.type = IMAGE\n\n preprocessing_defaults = {\n 'tokenizer': 'space',\n 'most_common': 10000,\n 'lowercase': False,\n 'missing_value_strategy': FILL_WITH_CONST,\n 'fill_value': ''\n }\n\n @staticmethod\n def get_feature_meta(column, preprocessing_parameters):\n idx2str, str2idx, str2freq, max_size = create_vocabulary(\n column,\n preprocessing_parameters['tokenizer'],\n num_most_frequent=preprocessing_parameters['most_common'],\n lowercase=preprocessing_parameters['lowercase']\n )\n return {\n 'idx2str': idx2str,\n 'str2idx': str2idx,\n 'str2freq': str2freq,\n 'vocab_size': len(str2idx),\n 'max_set_size': max_size\n }\n\n @staticmethod\n def feature_data(column, metadata, preprocessing_parameters):\n feature_vector = np.array(\n column.map(\n lambda x: set_str_to_idx(\n x,\n metadata['str2idx'],\n preprocessing_parameters['tokenizer']\n )\n )\n )\n\n set_matrix = np.zeros(\n (len(column),\n len(metadata['str2idx'])),\n dtype=bool\n )\n\n for i in range(len(column)):\n set_matrix[i, feature_vector[i]] = 1\n\n return set_matrix\n\n @staticmethod\n def add_feature_data(\n feature,\n dataset_df,\n data,\n metadata,\n preprocessing_parameters,\n ):\n data[feature['name']] = SetBaseFeature.feature_data(\n dataset_df[feature['name']].astype(str),\n metadata[feature['name']],\n preprocessing_parameters\n )\n\n\nclass SetInputFeature(SetBaseFeature, InputFeature):\n def __init__(self, feature):\n super().__init__(feature)\n\n self.vocab = []\n self.embedding_size = 50\n self.representation = 'dense'\n self.embeddings_trainable = True\n self.pretrained_embeddings = None\n self.embeddings_on_cpu = False\n self.dropout = False\n self.initializer = None\n self.regularize = True\n\n _ = self.overwrite_defaults(feature)\n\n self.embed_sparse = EmbedSparse(\n self.vocab,\n self.embedding_size,\n representation=self.representation,\n embeddings_trainable=self.embeddings_trainable,\n pretrained_embeddings=self.pretrained_embeddings,\n embeddings_on_cpu=self.embeddings_on_cpu,\n dropout=self.dropout,\n initializer=self.initializer,\n regularize=self.regularize\n )\n\n def _get_input_placeholder(self):\n # None is for dealing with variable batch size\n return tf.placeholder(\n tf.int32,\n shape=[None, len(self.vocab)],\n name=self.name\n )\n\n def build_input(\n self,\n regularizer,\n dropout_rate,\n is_training=False,\n **kwargs\n ):\n placeholder = self._get_input_placeholder()\n logger.debug(' placeholder: {0}'.format(placeholder))\n\n embedded, embedding_size = self.embed_sparse(\n placeholder,\n regularizer,\n dropout_rate,\n is_training=is_training\n )\n logger.debug(' feature_representation: {0}'.format(embedded))\n\n feature_representation = {\n 'name': self.name,\n 'type': self.type,\n 'representation': embedded,\n 'size': embedding_size,\n 'placeholder': placeholder\n }\n\n return feature_representation\n\n @staticmethod\n def update_model_definition_with_metadata(\n input_feature,\n feature_metadata,\n *args,\n **kwargs\n ):\n input_feature['vocab'] = feature_metadata['idx2str']\n\n @staticmethod\n def populate_defaults(input_feature):\n set_default_value(input_feature, 'tied_weights', None)\n\n\nclass SetOutputFeature(SetBaseFeature, OutputFeature):\n def __init__(self, feature):\n super().__init__(feature)\n self.type = SET\n\n self.loss = {'type': 'sigmoid_cross_entropy'}\n self.num_classes = 0\n self.threshold = 0.5\n self.initializer = None\n self.regularize = True\n\n _ = self.overwrite_defaults(feature)\n\n def _get_output_placeholder(self):\n return tf.placeholder(\n tf.bool,\n shape=[None, self.num_classes],\n name='{}_placeholder'.format(self.name)\n )\n\n def _get_predictions(\n self,\n hidden,\n hidden_size,\n regularizer=None\n ):\n if not self.regularize:\n regularizer = None\n\n with tf.variable_scope('predictions_{}'.format(self.name)):\n initializer_obj = get_initializer(self.initializer)\n weights = tf.get_variable(\n 'weights',\n initializer=initializer_obj([hidden_size, self.num_classes]),\n regularizer=regularizer\n )\n logger.debug(' class_weights: {0}'.format(weights))\n\n biases = tf.get_variable(\n 'biases',\n [self.num_classes]\n )\n logger.debug(' class_biases: {0}'.format(biases))\n\n logits = tf.matmul(hidden, weights) + biases\n logger.debug(' logits: {0}'.format(logits))\n\n probabilities = tf.nn.sigmoid(\n logits,\n name='probabilities_{}'.format(self.name)\n )\n\n predictions = tf.greater_equal(\n probabilities,\n self.threshold,\n name='predictions_{}'.format(self.name)\n )\n\n return predictions, probabilities, logits\n\n def _get_loss(\n self,\n targets,\n logits\n ):\n with tf.variable_scope('loss_{}'.format(self.name)):\n train_loss = tf.nn.sigmoid_cross_entropy_with_logits(\n labels=tf.cast(targets, tf.float32),\n logits=logits\n )\n train_loss = tf.reduce_sum(train_loss, axis=1)\n\n train_mean_loss = tf.reduce_mean(\n train_loss,\n name='train_mean_loss_{}'.format(self.name)\n )\n\n return train_mean_loss, train_loss\n\n def _get_measures(self, targets, predictions):\n intersection = tf.reduce_sum(\n tf.cast(tf.logical_and(targets, predictions), tf.float32),\n axis=1\n )\n union = tf.reduce_sum(\n tf.cast(tf.logical_or(targets, predictions), tf.float32),\n axis=1\n )\n jaccard_index = intersection / union\n\n return jaccard_index\n\n def build_output(\n self,\n hidden,\n hidden_size,\n regularizer=None,\n dropout_rate=None,\n is_training=None,\n **kwargs\n ):\n output_tensors = {}\n\n # ================ Placeholder ================\n targets = self._get_output_placeholder()\n output_tensors[self.name] = targets\n logger.debug(' targets_placeholder: {0}'.format(targets))\n\n # ================ Predictions ================\n ppl = self._get_predictions(\n hidden,\n hidden_size,\n regularizer=regularizer\n )\n predictions, probabilities, logits = ppl\n\n jaccard_index = self._get_measures(targets, predictions)\n\n output_tensors[PREDICTIONS + '_' + self.name] = predictions\n output_tensors[PROBABILITIES + '_' + self.name] = probabilities\n output_tensors[JACCARD + '_' + self.name] = jaccard_index\n\n # ================ Loss (Binary Cross Entropy) ================\n train_mean_loss, eval_loss = self._get_loss(targets, logits)\n\n output_tensors[EVAL_LOSS + '_' + self.name] = eval_loss\n output_tensors[TRAIN_MEAN_LOSS + '_' + self.name] = train_mean_loss\n\n tf.summary.scalar(\n 'train_mean_loss_{}'.format(self.name),\n train_mean_loss\n )\n\n return train_mean_loss, eval_loss, output_tensors\n\n default_validation_measure = JACCARD\n\n output_config = OrderedDict([\n (LOSS, {\n 'output': EVAL_LOSS,\n 'aggregation': SUM,\n 'value': 0,\n 'type': MEASURE\n }),\n (JACCARD, {\n 'output': JACCARD,\n 'aggregation': SUM,\n 'value': 0,\n 'type': MEASURE\n }),\n (PREDICTIONS, {\n 'output': PREDICTIONS,\n 'aggregation': APPEND,\n 'value': [],\n 'type': PREDICTION\n }),\n (PROBABILITIES, {\n 'output': PROBABILITIES,\n 'aggregation': APPEND,\n 'value': [],\n 'type': PREDICTION\n })\n ])\n\n @staticmethod\n def update_model_definition_with_metadata(\n output_feature,\n feature_metadata,\n *args,\n **kwargs\n ):\n output_feature[LOSS]['type'] = None\n output_feature['num_classes'] = feature_metadata['vocab_size']\n\n @staticmethod\n def calculate_overall_stats(\n test_stats,\n output_feature,\n dataset,\n train_set_metadata\n ):\n pass\n\n @staticmethod\n def postprocess_results(\n output_feature,\n result,\n metadata,\n experiment_dir_name,\n skip_save_unprocessed_output=False\n ):\n postprocessed = {}\n npy_filename = os.path.join(experiment_dir_name, '{}_{}.npy')\n name = output_feature['name']\n\n if PREDICTIONS in result and len(result[PREDICTIONS]) > 0:\n preds = result[PREDICTIONS]\n if 'idx2str' in metadata:\n postprocessed[PREDICTIONS] = [\n [metadata['idx2str'][i] for i, pred in enumerate(pred_set)\n if pred == True] for pred_set in preds\n ]\n else:\n postprocessed[PREDICTIONS] = preds\n\n if not skip_save_unprocessed_output:\n np.save(npy_filename.format(name, PREDICTIONS), preds)\n\n del result[PREDICTIONS]\n\n if PROBABILITIES in result and len(result[PROBABILITIES]) > 0:\n probs = result[PROBABILITIES]\n prob = [[prob for prob in prob_set if\n prob >= output_feature['threshold']] for prob_set in probs]\n postprocessed[PROBABILITIES] = probs\n postprocessed['probability'] = prob\n\n if not skip_save_unprocessed_output:\n np.save(npy_filename.format(name, PROBABILITIES), probs)\n np.save(npy_filename.format(name, 'probability'), probs)\n\n del result[PROBABILITIES]\n\n return postprocessed\n\n @staticmethod\n def populate_defaults(output_feature):\n set_default_value(output_feature, LOSS, {'weight': 1, 'type': None})\n set_default_value(output_feature[LOSS], 'weight', 1)\n\n set_default_value(output_feature, 'threshold', 0.5)\n set_default_value(output_feature, 'dependencies', [])\n set_default_value(output_feature, 'reduce_input', SUM)\n set_default_value(output_feature, 'reduce_dependencies', SUM)\n",
"# coding=utf-8\n# Copyright (c) 2019 Uber Technologies, Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nimport logging\n\nimport tensorflow as tf\n\n\nlogger = logging.getLogger(__name__)\n\n\ndef reduce_feed_forward_attention(current_inputs, hidden_size=256):\n with tf.variable_scope('reduce_ff_attention'):\n weights_1 = tf.get_variable('weights_1',\n [current_inputs.shape[-1], hidden_size])\n logger.debug(' att_weights_1: {}'.format(weights_1))\n biases_1 = tf.get_variable('biases_1', [hidden_size])\n logger.debug(' att_biases_1: {}'.format(biases_1))\n weights_2 = tf.get_variable('weights_2', [hidden_size, 1])\n logger.debug(' att_weights_2: {}'.format(weights_2))\n\n current_inputs_reshape = tf.reshape(current_inputs,\n [-1, current_inputs.shape[-1]])\n hidden = tf.tanh(\n tf.matmul(current_inputs_reshape, weights_1) + biases_1)\n logger.debug(' att_hidden: {}'.format(hidden))\n attention = tf.nn.softmax(tf.reshape(tf.matmul(hidden, weights_2),\n [-1, tf.shape(current_inputs)[1]]))\n logger.debug(' att_attention: {}'.format(attention))\n # attention [bs x seq]\n geated_inputs = tf.reduce_sum(\n tf.expand_dims(attention, -1) * current_inputs, 1)\n logger.debug(' att_geated_inputs: {}'.format(geated_inputs))\n return geated_inputs\n\n\ndef feed_forward_attention(current_inputs, feature_hidden_size,\n hidden_size=256):\n with tf.variable_scope('ff_attention'):\n geated_inputs = reduce_feed_forward_attention(current_inputs,\n hidden_size=hidden_size)\n\n # stacking inputs and attention vectors\n tiled_geated_inputs = tf.tile(tf.expand_dims(geated_inputs, 1),\n [1, tf.shape(current_inputs)[1], 1])\n logger.debug(\n ' att_tiled_geated_inputs: {}'.format(tiled_geated_inputs))\n outputs = tf.concat([current_inputs, tiled_geated_inputs],\n axis=-1) # [bs x s1 x 2*h]\n logger.debug(' att_outputs: {}'.format(outputs))\n # outputs = current_inputs + context # [bs x s1 x h]\n\n return outputs, feature_hidden_size * 2\n\n\ndef simple_memory_attention(current_inputs, context):\n assert current_inputs.shape[2] == context.shape[2]\n # calculating attention\n attention = tf.nn.softmax(\n tf.matmul(current_inputs, context, transpose_b=True)) # [bs x s1 x s2]\n logger.debug(' att_outputs: {}'.format(attention))\n\n # weighted_sum(attention, encoding_sequence_embedded)\n exp_ese = tf.expand_dims(context, 1) # [bs x 1 x s2 x h]\n exp_att = tf.expand_dims(attention, -1) # [bs x s1 x s2 x 1]\n weighted_sum = tf.multiply(exp_ese, exp_att) # [bs x s1 x s2 x h]\n reduced_weighted_sum = tf.reduce_sum(weighted_sum, axis=2) # [bs x s1 x h]\n logger.debug(' att_reduced_weighted_sum: {}'.format(reduced_weighted_sum))\n\n # stacking inputs and attention vectors\n outputs = tf.concat([current_inputs, reduced_weighted_sum],\n axis=-1) # [bs x s1 x 2*h]\n logger.debug(' att_outputs: {}'.format(outputs))\n\n return outputs, outputs.shape[-1]\n\n\ndef feed_forward_memory_attention(current_inputs, memory, hidden_size=256):\n seq_len = tf.shape(current_inputs)[1]\n mem_len = tf.shape(current_inputs)[1]\n seq_width = current_inputs.shape[2]\n mem_width = memory.shape[2]\n\n inputs_tile = tf.reshape(tf.tile(current_inputs, [1, 1, mem_len]),\n [-1, seq_len, mem_len, seq_width])\n context_tile = tf.reshape(tf.tile(memory, [1, seq_len, 1]),\n [-1, seq_len, mem_len, mem_width])\n concat_tile = tf.concat([inputs_tile, context_tile],\n axis=-1) # [bs, seq, seq, seq_w + ctx_w]\n logger.debug(' att_input_context_concat: {}'.format(concat_tile))\n\n with tf.variable_scope('reduce_contextual_ff_attention'):\n weights_1 = tf.get_variable('weights_1',\n [concat_tile.shape[-1], hidden_size])\n logger.debug(' att_weights_1: {}'.format(weights_1))\n biases_1 = tf.get_variable('biases_1', [hidden_size])\n logger.debug(' att_biases_1: {}'.format(biases_1))\n weights_2 = tf.get_variable('weights_2', [hidden_size, 1])\n logger.debug(' att_weights_2: {}'.format(weights_2))\n\n current_inputs_reshape = tf.reshape(concat_tile,\n [-1, concat_tile.shape[-1]])\n hidden = tf.tanh(\n tf.matmul(current_inputs_reshape, weights_1) + biases_1)\n logger.debug(' att_hidden: {}'.format(hidden))\n attention = tf.nn.softmax(\n tf.reshape(tf.matmul(hidden, weights_2), [-1, seq_len, mem_len]))\n logger.debug(' att_attention: {}'.format(attention))\n # attention [bs x seq]\n geated_inputs = tf.reduce_sum(\n tf.expand_dims(attention, -1) * inputs_tile, 2)\n logger.debug(' att_geated_inputs: {}'.format(geated_inputs))\n\n return geated_inputs, geated_inputs.shape.as_list()[-1]\n"
] |
[
[
"tensorflow.boolean_mask",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.equal",
"tensorflow.SparseTensor",
"tensorflow.ConfigProto",
"tensorflow.GPUOptions",
"tensorflow.sequence_mask",
"tensorflow.abs"
],
[
"tensorflow.get_variable",
"tensorflow.matmul",
"tensorflow.logical_or",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.logical_and"
],
[
"tensorflow.get_variable",
"tensorflow.multiply",
"tensorflow.concat",
"tensorflow.matmul",
"tensorflow.shape",
"tensorflow.reduce_sum",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.variable_scope",
"tensorflow.tile"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.5",
"1.7",
"1.0",
"1.2"
]
}
] |
gumpy-hybridBCI/gumpy-Realtime
|
[
"163f72ff4d8734cbfd13848e21ce7d4cafc6e8e9",
"163f72ff4d8734cbfd13848e21ce7d4cafc6e8e9"
] |
[
"src/live-generation-spectrograms/live_processing.py",
"src/Live-EEG_decoding/TEST_live_EEG-Split.py"
] |
[
"\"\"\"Utility scripts to provide a filterbank and spectrogram generator.\n\n\nThe filterbank utilizes filter functions defined in gumpy.signal.\n\nTo ensure a consistent spectrogram size, either cropping or scaling can be\nused. Just change the corresponding lines in the specGen.process function.\n\n\"\"\"\n\nimport math\n\nimport numpy as np\n#from scipy.misc import imresize # for spectrogram scaling, requires Pillow\nfrom scipy.signal import spectrogram\n\nimport gumpy.signal as gs\n\nclass filterbank:\n\n\tdef __init__(self, lowcut=2, highcut=60, order=3, fs=256):\n\t\tself.bandPass = gs.butter_bandpass(lowcut,highcut,order,fs)\n\t\tself.notch = gs.butter_bandstop()\n\t\t#notch = gs.notch()\n\n\tdef process(self, data):\n\t\treturn self.notch.process(self.bandPass.process(data))\n\nclass specGen:\n\n def __init__(self, width = 32, height = 32, numChannels = 3, lowf = 2, periods = 1.5, overlapRatio = 0.95, fs=256):\n self.width = width\n self.height = height\n self.nChannels = numChannels\n self.fs = fs\n self.lowf = lowf # lowcut\n self.SFFTwindowWidth = int(math.ceil(fs/lowf * periods))\n self.SFFToverlap = int(math.floor(self.SFFTwindowWidth * overlapRatio))\n self.smpPerSpec = int(self.SFFTwindowWidth + (self.width - 1) * (self.SFFTwindowWidth - self.SFFToverlap))\n\n def process(self, data):\n # raw spectrogram generation\n specsRaw = []\n for iChannel in xrange(self.nChannels):\n specsRaw.append(spectrogram(data[:, iChannel], self.fs, nperseg=self.SFFTwindowWidth, noverlap=self.SFFToverlap, detrend=False)[2]) \n\n # reshaping\n specs = np.zeros((self.nChannels, self.height, self.width))\n if specsRaw[1].shape[1]>self.width:\n start = spec_1.shape[1] - self.width\n else:\n start = 0\n\n for iChannel in xrange(self.nChannels):\n # cropped\n specs[iChannel, :, :] = specsRaw[iChannel][self.lowf:self.height+self.lowf, start:].copy()\n # scaled (requires imresize)\n #specs[iChannel, :, :] = imresize(arr=specsRaw[iChannel][self.lowf:, :], size=(self.height, self.width), interp='nearest', mode='F')\n\n return specs\n ",
"# Mirjam Hemberger \r\n# 14.03.2018\r\n# this script is used to test the functions in EEG-motor-imaginery-NST_live.py and nst_eeg_live without having to call them from record_data or run_session\r\n# Training of the model and testing with a certain ration, e.g. 80% data for training, 20% for testing\r\n\r\n\r\n#import matplotlib.pyplot as plt \r\n\r\nimport sys, os, os.path\r\nsys.path.append('C:\\\\Users\\\\mirja\\\\Anaconda3\\\\Lib\\\\site-packages\\\\gumpy-master\\\\gumpy')\r\n\r\nimport numpy as np\r\nimport gumpy\r\n\r\nfrom eeg_motor_imagery_NST_live_split import liveEEG_split\r\nfrom gumpy.data.nst_eeg_live import NST_EEG_LIVE\r\n\r\nif __name__ == '__main__':\r\n save_stdout = sys.stdout\r\n fh = open('Results_LiveClassfication.txt', 'w')\r\n sys.stdout = fh\r\n subjects = {'s1','s4', 's20'}\r\n print('\\nTraining-testing with 50-50 percent')\r\n print('Classifier: Quadratic LDA\\n')\r\n \r\n for subject in subjects:\r\n print('\\n\\n\\nData identification:', subject, '\\n')\r\n data_base_dir = 'C:\\\\Users\\\\mirja\\\\Documents\\\\TUM\\\\IP\\\\NST'\r\n base_dir = os.path.join(data_base_dir, subject)\r\n \r\n #isn't used for training and testing with data split\r\n file_name = 'Run1.mat'\r\n file_name2 = 'Run3.mat'\r\n\r\n myclass = liveEEG_split(base_dir,file_name)\r\n\r\n myclass.fit()\r\n\r\n count_pred_true = 0\r\n count_pred_false = 0\r\n\r\n current_classifier, pred_true, pred_valid = myclass.classify_live()\r\n\r\n #print('Classification Result:', current_classifier)\r\n #print('pred_true words:', pred_true)\r\n\r\n pred_true = np.int_(pred_true)\r\n print('pred_true numbers:', pred_true)\r\n print('true numbers:', pred_valid)\r\n count_pred_true = sum(pred_true)\r\n count_pred_false = len(pred_true) - count_pred_true\r\n\r\n print('Count of true predictions:', count_pred_true)\r\n print('Count of false predictions:', count_pred_false)\r\n print('Percentage of true predictions:', 100*count_pred_true/(count_pred_false+count_pred_true), '\\n\\n')\r\n\r\n sys.stdout = save_stdout\r\n fh.close()\r\n"
] |
[
[
"numpy.zeros",
"scipy.signal.spectrogram"
],
[
"numpy.int_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"1.6",
"1.10",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
arghyaganguly/model-analysis
|
[
"1617375dd35e72447653e54330484c3a2950e4c6"
] |
[
"tensorflow_model_analysis/size_estimator_test.py"
] |
[
"# Lint as: python3\n# Copyright 2020 Google LLC\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Tests for size estimator.\"\"\"\n\nimport sys\n\nimport tensorflow as tf\nfrom tensorflow_model_analysis import size_estimator\n\n\nclass SizeEstimatorTest(tf.test.TestCase):\n\n def testRefCountAmortization(self):\n estimator = size_estimator.SizeEstimator(size_threshold=10, size_fn=len)\n self.assertEqual(estimator.get_estimate(), 0)\n a = b'plmjh'\n b, c = a, a\n expected_size_estimate = (len(a) / (sys.getrefcount(a) - 1)) * 4\n estimator.update(a)\n estimator.update(b)\n estimator.update(c)\n estimator.update(a)\n self.assertEqual(estimator.get_estimate(), expected_size_estimate)\n self.assertFalse(estimator.should_flush())\n\n def testFlush(self):\n estimator = size_estimator.SizeEstimator(size_threshold=10, size_fn=len)\n self.assertEqual(estimator.get_estimate(), 0)\n estimator.update(b'plmjh')\n estimator.update(b'plmjhghytfghsggssss')\n self.assertTrue(estimator.should_flush())\n estimator.clear()\n self.assertEqual(estimator.get_estimate(), 0)\n\n def testMergeEstimators(self):\n estimator1 = size_estimator.SizeEstimator(size_threshold=10, size_fn=len)\n self.assertEqual(estimator1.get_estimate(), 0)\n estimator2 = size_estimator.SizeEstimator(size_threshold=10, size_fn=len)\n self.assertEqual(estimator2.get_estimate(), 0)\n a = b'plmjh'\n b, c = a, a\n expected_size_estimate = (len(a) / (sys.getrefcount(a) - 1)) * 4\n estimator1.update(a)\n estimator1.update(b)\n estimator2.update(c)\n estimator2.update(a)\n estimator1 += estimator2\n self.assertEqual(estimator1.get_estimate(), expected_size_estimate)\n\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.test.main"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
will-moore/napari
|
[
"56d18b4563654417067d187f8d796c7b3898c56f",
"56d18b4563654417067d187f8d796c7b3898c56f"
] |
[
"napari/layers/labels/_tests/test_labels.py",
"napari/_vispy/_tests/test_vispy_multiscale.py"
] |
[
"import numpy as np\nimport pytest\n\nfrom napari._tests.utils import check_layer_world_data_extent\nfrom napari.layers import Labels\nfrom napari.layers.labels._labels_constants import LabelColorMode\nfrom napari.utils import Colormap\n\n\ndef test_random_labels():\n \"\"\"Test instantiating Labels layer with random 2D data.\"\"\"\n shape = (10, 15)\n np.random.seed(0)\n data = np.random.randint(20, size=shape)\n layer = Labels(data)\n assert np.all(layer.data == data)\n assert layer.ndim == len(shape)\n assert layer.shape == shape\n assert layer._data_view.shape == shape[-2:]\n assert layer.editable is True\n\n\ndef test_all_zeros_labels():\n \"\"\"Test instantiating Labels layer with all zeros data.\"\"\"\n shape = (10, 15)\n data = np.zeros(shape, dtype=int)\n layer = Labels(data)\n assert np.all(layer.data == data)\n assert layer.ndim == len(shape)\n assert layer.shape == shape\n assert layer._data_view.shape == shape[-2:]\n\n\ndef test_3D_labels():\n \"\"\"Test instantiating Labels layer with random 3D data.\"\"\"\n shape = (6, 10, 15)\n np.random.seed(0)\n data = np.random.randint(20, size=shape)\n layer = Labels(data)\n assert np.all(layer.data == data)\n assert layer.ndim == len(shape)\n assert layer.shape == shape\n assert layer._data_view.shape == shape[-2:]\n assert layer.editable is True\n\n layer._slice_dims(ndisplay=3)\n assert layer._dims.ndisplay == 3\n assert layer.editable is False\n assert layer.mode == 'pan_zoom'\n\n\ndef test_changing_labels():\n \"\"\"Test changing Labels data.\"\"\"\n shape_a = (10, 15)\n shape_b = (20, 12)\n np.random.seed(0)\n data_a = np.random.randint(20, size=shape_a)\n data_b = np.random.randint(20, size=shape_b)\n layer = Labels(data_a)\n layer.data = data_b\n assert np.all(layer.data == data_b)\n assert layer.ndim == len(shape_b)\n assert layer.shape == shape_b\n assert layer._data_view.shape == shape_b[-2:]\n\n\ndef test_changing_labels_dims():\n \"\"\"Test changing Labels data including dimensionality.\"\"\"\n shape_a = (10, 15)\n shape_b = (20, 12, 6)\n np.random.seed(0)\n data_a = np.random.randint(20, size=shape_a)\n data_b = np.random.randint(20, size=shape_b)\n layer = Labels(data_a)\n\n layer.data = data_b\n assert np.all(layer.data == data_b)\n assert layer.ndim == len(shape_b)\n assert layer.shape == shape_b\n assert layer._data_view.shape == shape_b[-2:]\n\n\ndef test_changing_modes():\n \"\"\"Test changing modes.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.mode == 'pan_zoom'\n assert layer.interactive is True\n\n layer.mode = 'fill'\n assert layer.mode == 'fill'\n assert layer.interactive is False\n\n layer.mode = 'paint'\n assert layer.mode == 'paint'\n assert layer.interactive is False\n\n layer.mode = 'pick'\n assert layer.mode == 'pick'\n assert layer.interactive is False\n\n layer.mode = 'pan_zoom'\n assert layer.mode == 'pan_zoom'\n assert layer.interactive is True\n\n layer.mode = 'paint'\n assert layer.mode == 'paint'\n layer.editable = False\n assert layer.mode == 'pan_zoom'\n assert layer.editable is False\n\n\ndef test_name():\n \"\"\"Test setting layer name.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.name == 'Labels'\n\n layer = Labels(data, name='random')\n assert layer.name == 'random'\n\n layer.name = 'lbls'\n assert layer.name == 'lbls'\n\n\ndef test_visiblity():\n \"\"\"Test setting layer visibility.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.visible is True\n\n layer.visible = False\n assert layer.visible is False\n\n layer = Labels(data, visible=False)\n assert layer.visible is False\n\n layer.visible = True\n assert layer.visible is True\n\n\ndef test_opacity():\n \"\"\"Test setting layer opacity.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.opacity == 0.7\n\n layer.opacity = 0.5\n assert layer.opacity == 0.5\n\n layer = Labels(data, opacity=0.6)\n assert layer.opacity == 0.6\n\n layer.opacity = 0.3\n assert layer.opacity == 0.3\n\n\ndef test_blending():\n \"\"\"Test setting layer blending.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.blending == 'translucent'\n\n layer.blending = 'additive'\n assert layer.blending == 'additive'\n\n layer = Labels(data, blending='additive')\n assert layer.blending == 'additive'\n\n layer.blending = 'opaque'\n assert layer.blending == 'opaque'\n\n\ndef test_seed():\n \"\"\"Test setting seed.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.seed == 0.5\n\n layer.seed = 0.9\n assert layer.seed == 0.9\n\n layer = Labels(data, seed=0.7)\n assert layer.seed == 0.7\n\n\ndef test_num_colors():\n \"\"\"Test setting number of colors in colormap.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.num_colors == 50\n\n layer.num_colors = 80\n assert layer.num_colors == 80\n\n layer = Labels(data, num_colors=60)\n assert layer.num_colors == 60\n\n\ndef test_properties():\n \"\"\"Test adding labels with properties.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n\n layer = Labels(data)\n assert isinstance(layer.properties, dict)\n assert len(layer.properties) == 0\n\n properties = {'class': ['Background'] + [f'Class {i}' for i in range(20)]}\n label_index = {i: i for i in range(len(properties['class']))}\n layer = Labels(data, properties=properties)\n assert isinstance(layer.properties, dict)\n assert layer.properties == properties\n assert layer._label_index == label_index\n\n current_label = layer.get_value()\n layer_message = layer.get_message()\n assert layer_message.endswith(f'Class {current_label - 1}')\n\n properties = {'class': ['Background']}\n layer = Labels(data, properties=properties)\n layer_message = layer.get_message()\n assert layer_message.endswith(\"[No Properties]\")\n\n properties = {'class': ['Background', 'Class 12'], 'index': [0, 12]}\n label_index = {0: 0, 12: 1}\n layer = Labels(data, properties=properties)\n layer_message = layer.get_message()\n assert layer._label_index == label_index\n assert layer_message.endswith('Class 12')\n\n\ndef test_multiscale_properties():\n \"\"\"Test adding labels with multiscale properties.\"\"\"\n np.random.seed(0)\n data0 = np.random.randint(20, size=(10, 15))\n data1 = data0[::2, ::2]\n data = [data0, data1]\n\n layer = Labels(data)\n assert isinstance(layer.properties, dict)\n assert len(layer.properties) == 0\n\n properties = {'class': ['Background'] + [f'Class {i}' for i in range(20)]}\n label_index = {i: i for i in range(len(properties['class']))}\n layer = Labels(data, properties=properties)\n assert isinstance(layer.properties, dict)\n assert layer.properties == properties\n assert layer._label_index == label_index\n\n current_label = layer.get_value()[1]\n layer_message = layer.get_message()\n assert layer_message.endswith(f'Class {current_label - 1}')\n\n properties = {'class': ['Background']}\n layer = Labels(data, properties=properties)\n layer_message = layer.get_message()\n assert layer_message.endswith(\"[No Properties]\")\n\n properties = {'class': ['Background', 'Class 12'], 'index': [0, 12]}\n label_index = {0: 0, 12: 1}\n layer = Labels(data, properties=properties)\n layer_message = layer.get_message()\n assert layer._label_index == label_index\n assert layer_message.endswith('Class 12')\n\n\ndef test_colormap():\n \"\"\"Test colormap.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert isinstance(layer.colormap, Colormap)\n assert layer.colormap.name == 'label_colormap'\n\n layer.new_colormap()\n assert isinstance(layer.colormap, Colormap)\n assert layer.colormap.name == 'label_colormap'\n\n\ndef test_custom_color_dict():\n \"\"\"Test custom color dict.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(\n data, color={2: 'white', 4: 'red', 8: 'blue', 16: 'red', 32: 'blue'}\n )\n\n # test with custom color dict\n assert type(layer.get_color(2)) == np.ndarray\n assert type(layer.get_color(1)) == np.ndarray\n assert (layer.get_color(2) == np.array([1.0, 1.0, 1.0, 1.0])).all()\n assert (layer.get_color(4) == layer.get_color(16)).all()\n assert (layer.get_color(8) == layer.get_color(32)).all()\n\n # test disable custom color dict\n # should not initialize as white since we are using random.seed\n layer.color_mode = 'auto'\n assert not (layer.get_color(1) == np.array([1.0, 1.0, 1.0, 1.0])).all()\n\n\ndef test_metadata():\n \"\"\"Test setting labels metadata.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.metadata == {}\n\n layer = Labels(data, metadata={'unit': 'cm'})\n assert layer.metadata == {'unit': 'cm'}\n\n\ndef test_brush_size():\n \"\"\"Test changing brush size.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.brush_size == 10\n\n layer.brush_size = 20\n assert layer.brush_size == 20\n\n\ndef test_contiguous():\n \"\"\"Test changing contiguous.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.contiguous is True\n\n layer.contiguous = False\n assert layer.contiguous is False\n\n\ndef test_n_dimensional():\n \"\"\"Test changing n_dimensional.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.n_dimensional is False\n\n layer.n_dimensional = True\n assert layer.n_dimensional is True\n\n\ndef test_selecting_label():\n \"\"\"Test selecting label.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n assert layer.selected_label == 1\n assert (layer._selected_color == layer.get_color(1)).all\n\n layer.selected_label = 1\n assert layer.selected_label == 1\n assert len(layer._selected_color) == 4\n\n\ndef test_label_color():\n \"\"\"Test getting label color.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n col = layer.get_color(0)\n assert col is None\n\n col = layer.get_color(1)\n assert len(col) == 4\n\n\ndef test_selected_mode_label_color():\n \"\"\"Test color of labels in selected color mode\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n original_color = layer.get_color(1)\n\n layer.color_mode = LabelColorMode.SELECTED\n original_background_color = layer.get_color(layer._background_label)\n none_color = layer.get_color(None)\n layer.selected_label = 1\n\n # color of selected label has not changed\n assert np.allclose(layer.get_color(layer.selected_label), original_color)\n\n current_background_color = layer.get_color(layer._background_label)\n # color of background is background color\n assert current_background_color == original_background_color\n\n # color of all others is none color\n other_labels = np.unique(layer.data)[2:]\n other_colors = np.array(\n list(map(lambda x: layer.get_color(x), other_labels))\n )\n assert np.allclose(other_colors, none_color)\n\n\ndef test_paint():\n \"\"\"Test painting labels with different square brush sizes.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n data[:10, :10] = 1\n layer = Labels(data)\n layer.brush_shape = 'square'\n assert np.unique(layer.data[:5, :5]) == 1\n assert np.unique(layer.data[5:10, 5:10]) == 1\n\n layer.brush_size = 9\n layer.paint([0, 0], 2)\n assert np.unique(layer.data[:5, :5]) == 2\n assert np.unique(layer.data[5:10, 5:10]) == 1\n\n layer.brush_size = 10\n layer.paint([0, 0], 2)\n assert np.unique(layer.data[:6, :6]) == 2\n assert np.unique(layer.data[6:10, 6:10]) == 1\n\n layer.brush_size = 19\n layer.paint([0, 0], 2)\n assert np.unique(layer.data[:5, :5]) == 2\n assert np.unique(layer.data[5:10, 5:10]) == 2\n\n\ndef test_paint_with_preserve_labels():\n \"\"\"Test painting labels with square brush while preserving existing labels.\"\"\"\n data = np.zeros((15, 10))\n data[:3, :3] = 1\n layer = Labels(data)\n layer.brush_shape = 'square'\n layer.preserve_labels = True\n assert np.unique(layer.data[:3, :3]) == 1\n\n layer.brush_size = 9\n layer.paint([0, 0], 2)\n\n assert np.unique(layer.data[3:5, 0:5]) == 2\n assert np.unique(layer.data[0:5, 3:5]) == 2\n assert np.unique(layer.data[:3, :3]) == 1\n\n\[email protected](\n \"brush_shape, expected_sum\",\n [(\"circle\", [41, 137, 137, 41, 349]), (\"square\", [36, 144, 169, 36, 400])],\n)\ndef test_paint_2d(brush_shape, expected_sum):\n \"\"\"Test painting labels with circle/square brush.\"\"\"\n data = np.zeros((40, 40))\n layer = Labels(data)\n layer.brush_size = 12\n layer.brush_shape = brush_shape\n layer.mode = 'paint'\n layer.paint((0, 0), 3)\n\n layer.brush_size = 12\n layer.paint((15, 8), 4)\n\n layer.brush_size = 13\n layer.paint((30.2, 7.8), 5)\n\n layer.brush_size = 12\n layer.paint((39, 39), 6)\n\n layer.brush_size = 20\n layer.paint((15, 27), 7)\n\n assert np.sum(layer.data[:8, :8] == 3) == expected_sum[0]\n assert np.sum(layer.data[9:22, 2:15] == 4) == expected_sum[1]\n assert np.sum(layer.data[24:37, 2:15] == 5) == expected_sum[2]\n assert np.sum(layer.data[33:, 33:] == 6) == expected_sum[3]\n assert np.sum(layer.data[5:26, 17:38] == 7) == expected_sum[4]\n\n\[email protected](\n \"brush_shape, expected_sum\",\n [(\"circle\", [137, 1189, 1103]), (\"square\", [144, 1728, 1548])],\n)\ndef test_paint_3d(brush_shape, expected_sum):\n \"\"\"Test painting labels with circle/square brush on 3D image.\"\"\"\n data = np.zeros((30, 40, 40))\n layer = Labels(data)\n layer.brush_size = 12\n layer.brush_shape = brush_shape\n layer.mode = 'paint'\n\n # Paint in 2D\n layer.paint((10, 10, 10), 3)\n\n # Paint in 3D\n layer.n_dimensional = True\n layer.paint((10, 25, 10), 4)\n\n # Paint in 3D, preserve labels\n layer.n_dimensional = True\n layer.preserve_labels = True\n layer.paint((10, 15, 15), 5)\n\n assert np.sum(layer.data[4:17, 4:17, 4:17] == 3) == expected_sum[0]\n assert np.sum(layer.data[4:17, 19:32, 4:17] == 4) == expected_sum[1]\n assert np.sum(layer.data[4:17, 9:32, 9:32] == 5) == expected_sum[2]\n\n\ndef test_fill():\n \"\"\"Test filling labels with different brush sizes.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n data[:10, :10] = 2\n data[:5, :5] = 1\n layer = Labels(data)\n assert np.unique(layer.data[:5, :5]) == 1\n assert np.unique(layer.data[5:10, 5:10]) == 2\n\n layer.fill([0, 0], 3)\n assert np.unique(layer.data[:5, :5]) == 3\n assert np.unique(layer.data[5:10, 5:10]) == 2\n\n\ndef test_value():\n \"\"\"Test getting the value of the data at the current coordinates.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n value = layer.get_value()\n assert layer.coordinates == (0, 0)\n assert value == data[0, 0]\n\n\ndef test_message():\n \"\"\"Test converting value and coords to message.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(10, 15))\n layer = Labels(data)\n msg = layer.get_message()\n assert type(msg) == str\n\n\ndef test_thumbnail():\n \"\"\"Test the image thumbnail for square data.\"\"\"\n np.random.seed(0)\n data = np.random.randint(20, size=(30, 30))\n layer = Labels(data)\n layer._update_thumbnail()\n assert layer.thumbnail.shape == layer._thumbnail_shape\n\n\ndef test_world_data_extent():\n \"\"\"Test extent after applying transforms.\"\"\"\n np.random.seed(0)\n shape = (6, 10, 15)\n data = np.random.randint(20, size=(shape))\n layer = Labels(data)\n extent = np.array(((0,) * 3, np.subtract(shape, 1)))\n check_layer_world_data_extent(layer, extent, (3, 1, 1), (10, 20, 5))\n",
"import os\nimport sys\n\nimport numpy as np\nimport pytest\n\n\ndef test_multiscale(make_test_viewer):\n \"\"\"Test rendering of multiscale data.\"\"\"\n viewer = make_test_viewer()\n\n shapes = [(4000, 3000), (2000, 1500), (1000, 750), (500, 375)]\n np.random.seed(0)\n data = [np.random.random(s) for s in shapes]\n _ = viewer.add_image(data, multiscale=True, contrast_limits=[0, 1])\n layer = viewer.layers[0]\n\n # Set canvas size to target amount\n viewer.window.qt_viewer.view.canvas.size = (800, 600)\n viewer.window.qt_viewer.on_draw(None)\n\n # Check that current level is first large enough to fill the canvas with\n # a greater than one pixel depth\n assert layer.data_level == 2\n\n # Check that full field of view is currently requested\n assert np.all(layer.corner_pixels[0] <= [0, 0])\n assert np.all(layer.corner_pixels[1] >= np.subtract(shapes[2], 1))\n\n # Test value at top left corner of image\n viewer.cursor.position = (0, 0)\n value = layer.get_value()\n np.testing.assert_allclose(value, (2, data[2][(0, 0)]))\n\n # Test value at bottom right corner of image\n viewer.cursor.position = (3995, 2995)\n value = layer.get_value()\n np.testing.assert_allclose(value, (2, data[2][(999, 749)]))\n\n # Test value outside image\n viewer.cursor.position = (4000, 3000)\n value = layer.get_value()\n assert value[1] is None\n\n\ndef test_3D_multiscale_image(make_test_viewer):\n \"\"\"Test rendering of 3D multiscale image uses lowest resolution.\"\"\"\n viewer = make_test_viewer()\n\n data = [np.random.random((128,) * 3), np.random.random((64,) * 3)]\n viewer.add_image(data)\n\n # Check that this doesn't crash.\n viewer.dims.ndisplay = 3\n\n # Check lowest resolution is used\n assert viewer.layers[0].data_level == 1\n\n # Note that draw command must be explicitly triggered in our tests\n viewer.window.qt_viewer.on_draw(None)\n\n\[email protected](\n sys.platform.startswith('win') or not os.getenv(\"CI\"),\n reason='Screenshot tests are not supported on napari windows CI.',\n)\ndef test_multiscale_screenshot(make_test_viewer):\n \"\"\"Test rendering of multiscale data with screenshot.\"\"\"\n viewer = make_test_viewer(show=True)\n\n shapes = [(4000, 3000), (2000, 1500), (1000, 750), (500, 375)]\n data = [np.ones(s) for s in shapes]\n _ = viewer.add_image(data, multiscale=True, contrast_limits=[0, 1])\n\n # Set canvas size to target amount\n viewer.window.qt_viewer.view.canvas.size = (800, 600)\n\n screenshot = viewer.screenshot(canvas_only=True)\n center_coord = np.round(np.array(screenshot.shape[:2]) / 2).astype(np.int)\n target_center = np.array([255, 255, 255, 255], dtype='uint8')\n target_edge = np.array([0, 0, 0, 255], dtype='uint8')\n screen_offset = 3 # Offset is needed as our screenshots have black borders\n\n np.testing.assert_allclose(screenshot[tuple(center_coord)], target_center)\n np.testing.assert_allclose(\n screenshot[screen_offset, screen_offset], target_edge\n )\n np.testing.assert_allclose(\n screenshot[-screen_offset, -screen_offset], target_edge\n )\n\n\[email protected](\n sys.platform.startswith('win') or not os.getenv(\"CI\"),\n reason='Screenshot tests are not supported on napari windows CI.',\n)\ndef test_multiscale_screenshot_zoomed(make_test_viewer):\n \"\"\"Test rendering of multiscale data with screenshot after zoom.\"\"\"\n viewer = make_test_viewer(show=True)\n view = viewer.window.qt_viewer\n\n shapes = [(4000, 3000), (2000, 1500), (1000, 750), (500, 375)]\n data = [np.ones(s) for s in shapes]\n _ = viewer.add_image(data, multiscale=True, contrast_limits=[0, 1])\n\n # Set canvas size to target amount\n view.view.canvas.size = (800, 600)\n\n # Set zoom of camera to show highest resolution tile\n view.view.camera.rect = [1000, 1000, 200, 150]\n viewer.window.qt_viewer.on_draw(None)\n\n # Check that current level is bottom level of multiscale\n assert viewer.layers[0].data_level == 0\n\n screenshot = viewer.screenshot(canvas_only=True)\n center_coord = np.round(np.array(screenshot.shape[:2]) / 2).astype(np.int)\n target_center = np.array([255, 255, 255, 255], dtype='uint8')\n screen_offset = 3 # Offset is needed as our screenshots have black borders\n\n np.testing.assert_allclose(screenshot[tuple(center_coord)], target_center)\n np.testing.assert_allclose(\n screenshot[screen_offset, screen_offset], target_center\n )\n np.testing.assert_allclose(\n screenshot[-screen_offset, -screen_offset], target_center\n )\n\n\[email protected](\n sys.platform.startswith('win') or not os.getenv(\"CI\"),\n reason='Screenshot tests are not supported on napari windows CI.',\n)\ndef test_image_screenshot_zoomed(make_test_viewer):\n \"\"\"Test rendering of image data with screenshot after zoom.\"\"\"\n viewer = make_test_viewer(show=True)\n view = viewer.window.qt_viewer\n\n data = np.ones((4000, 3000))\n _ = viewer.add_image(data, multiscale=False, contrast_limits=[0, 1])\n\n # Set canvas size to target amount\n view.view.canvas.size = (800, 600)\n\n # Set zoom of camera to show highest resolution tile\n view.view.camera.rect = [1000, 1000, 200, 150]\n viewer.window.qt_viewer.on_draw(None)\n\n screenshot = viewer.screenshot(canvas_only=True)\n center_coord = np.round(np.array(screenshot.shape[:2]) / 2).astype(np.int)\n target_center = np.array([255, 255, 255, 255], dtype='uint8')\n screen_offset = 3 # Offset is needed as our screenshots have black borders\n\n np.testing.assert_allclose(screenshot[tuple(center_coord)], target_center)\n np.testing.assert_allclose(\n screenshot[screen_offset, screen_offset], target_center\n )\n np.testing.assert_allclose(\n screenshot[-screen_offset, -screen_offset], target_center\n )\n\n\[email protected](\n sys.platform.startswith('win') or not os.getenv(\"CI\"),\n reason='Screenshot tests are not supported on napari windows CI.',\n)\ndef test_5D_multiscale(make_test_viewer):\n \"\"\"Test 5D multiscale data.\"\"\"\n # Show must be true to trigger multiscale draw and corner estimation\n viewer = make_test_viewer(show=True)\n shapes = [(1, 2, 5, 20, 20), (1, 2, 5, 10, 10), (1, 2, 5, 5, 5)]\n np.random.seed(0)\n data = [np.random.random(s) for s in shapes]\n layer = viewer.add_image(data, multiscale=True)\n assert layer.data == data\n assert layer.multiscale is True\n assert layer.ndim == len(shapes[0])\n"
] |
[
[
"numpy.allclose",
"numpy.random.seed",
"numpy.unique",
"numpy.subtract",
"numpy.all",
"numpy.array",
"numpy.zeros",
"numpy.sum",
"numpy.random.randint"
],
[
"numpy.random.random",
"numpy.random.seed",
"numpy.subtract",
"numpy.ones",
"numpy.all",
"numpy.testing.assert_allclose",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
simon-donne/defusr
|
[
"fa4275070af4024eea128e99d7c6df2358d129a5",
"fa4275070af4024eea128e99d7c6df2358d129a5"
] |
[
"code/losses/depth_gt.py",
"code/utils/chamfer_evaluation.py"
] |
[
"\nimport torch\nimport numpy as np\n\nclass DepthGTLoss(torch.nn.Module):\n \"\"\"\n A simple L1 loss, but restricted to the cropped center of the image.\n It also does not count pixels outside of a given range of values (in target).\n Additionally, there is also an L1 loss on the gradient.\n \"\"\"\n def __init__(self, crop_fraction=0.25, vmin=0, vmax=1, limit=10):\n \"\"\"\n The input should be (batch x channels x height x width).\n We L1-penalize the inner portion of the image,\n with crop_fraction cut off from all borders.\n\n Keyword arguments:\n crop_fraction -- fraction to cut off from all sides (defaults to 0.25)\n vmin -- minimal (GT!) value to supervise\n vmax -- maximal (GT!) value to supervise\n limit -- anything higher than this is wrong, and should be ignored\n \"\"\"\n super().__init__()\n\n self.crop_fraction = crop_fraction\n \"Cut-off fraction\"\n\n self.vmin = vmin\n \"Lower bound for valid target pixels\"\n\n self.vmax = vmax\n \"Upper bound for valid target pixels\"\n\n self.sobel_x = torch.nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)\n self.sobel_x.weight = torch.nn.Parameter(torch.from_numpy(np.array([[1, 0, -1],[2,0,-2],[1,0,-1]])/8.).float().unsqueeze(0).unsqueeze(0))\n self.sobel_y = torch.nn.Conv2d(1, 1, kernel_size=3, stride=1, padding=1, bias=False)\n self.sobel_y.weight = torch.nn.Parameter(torch.from_numpy(np.array([[1, 2, 1],[0,0,0],[-1,-2,-1]])/8.).float().unsqueeze(0).unsqueeze(0))\n\n gpu = torch.device('cuda')\n self.sobel_x = self.sobel_x.to(gpu)\n self.sobel_y = self.sobel_y.to(gpu)\n\n self.limit = limit\n\n\n def forward(self, input, target):\n height = input.size(2)\n heightcrop = int(height * self.crop_fraction)\n width = input.size(3)\n widthcrop = int(width * self.crop_fraction)\n\n if self.crop_fraction > 0:\n input_crop = input[:,:,heightcrop:height-heightcrop,widthcrop:width-widthcrop]\n target_crop = target[:,:,heightcrop:height-heightcrop,widthcrop:width-widthcrop]\n else:\n input_crop = input\n target_crop = target\n\n valid_mask = (target_crop.le(self.vmax) * target_crop.ge(self.vmin)).float()\n\n loss = torch.abs( (input_crop - target_crop) * valid_mask ).sum()\n loss = loss / valid_mask.sum().clamp(min=1)\n\n input_gradx = self.sobel_x(input_crop)\n input_grady = self.sobel_y(input_crop)\n\n target_gradx = self.sobel_x(target_crop)\n target_grady = self.sobel_y(target_crop)\n\n grad_maskx = self.sobel_x(valid_mask)\n grad_masky = self.sobel_y(valid_mask)\n grad_valid_mask = (grad_maskx.eq(0) * grad_masky.eq(0)).float()*valid_mask\n\n gradloss = torch.abs( (input_gradx - target_gradx) ) + torch.abs( (input_grady - target_grady) )\n gradloss = ( gradloss * grad_valid_mask ).sum()\n gradloss = gradloss / grad_valid_mask.sum().clamp(min=1)\n\n loss = loss + gradloss\n\n # if this loss value is not plausible, cap it (which will also not backprop gradients)\n if self.limit is not None and loss > self.limit:\n loss = torch.clamp(loss, max=self.limit)\n\n if loss.ne(loss).item():\n print(\"Nan loss!\")\n\n return loss\n",
"\"\"\"\nCreating the visibility masks for all of the scenes.\n\"\"\"\n\nimport os\nfrom datasets.DTU import DTUAdapter\nfrom datasets.unreal_DTU import UnrealDTUAdapter\nfrom datasets.flying_things import FlyingThingsAdapter\nimport numpy as np\nfrom utils.ply import load_pointcloud_ply, save_pointcloud_ply, load_pointcloud_ply_gipuma\nfrom utils.depth_maps import depth_map_to_point_cloud\nfrom utils.file_system import ensure_dir\nfrom utils.debug import save_gray_tensor, save_color_tensor\nfrom utils.timer import Timer\nfrom experiment_handler import ExperimentHandler\nimport cv2\nfrom utils.depth_map_visualization import color_depth_map, color_map_errors\nimport MYTH\nimport torch\n\nfrom tqdm import tqdm\n\ncount_threshold = 2\nsparsity = 0.5\nthreshold = 2.0\n\n# NOTE: the threadcount is hard-set to 1024 because it is used in some shared-memory allocations inside the sparsity kernel\n\ndef compile_sparsity_count_kernel():\n def sparsity_count_gpu(points, sparsity1, sparsity2):\n N = points.shape[1]\n counts1 = np.zeros((N,))\n counts2 = np.zeros((N,))\n if N == 0:\n return counts1, counts2\n points = torch.Tensor(points).cuda()\n counts1 = torch.Tensor(counts1).cuda()\n counts2 = torch.Tensor(counts2).cuda()\n\n MYTH.bruteforce_sparsity_count_gpu(points, counts1, counts2, N, sparsity1, sparsity2)\n\n torch.cuda.synchronize()\n\n counts1 = counts1.cpu().numpy()\n counts2 = counts2.cpu().numpy()\n\n return counts1, counts2\n return sparsity_count_gpu\n\ndef compile_distance_kernel():\n def distance_gpu(points_from, points_to):\n N = points_from.shape[1]\n M = points_to.shape[1]\n dists = np.zeros((N,))\n if N == 0:\n return dists\n if M == 0:\n dists.fill(np.inf)\n return dists\n\n points_from = torch.Tensor(points_from).cuda()\n points_to = torch.Tensor(points_to).cuda()\n dists = torch.Tensor(dists).cuda()\n\n MYTH.bruteforce_distance_gpu(points_from, points_to, dists, N, M)\n\n torch.cuda.synchronize()\n\n dists = np.sqrt(dists.cpu().numpy())\n\n return dists\n return distance_gpu\n\nsparsity_count = compile_sparsity_count_kernel()\ncloud_distance = compile_distance_kernel()\n\ndef filter_cloud(locs, cols, bb=None, visibility=None, voxel_size=None, count_threshold=0, actual_sparsity=None, actual_threshold=None, actual_count_threshold=None):\n if bb is not None:\n inside_bounds = np.all((locs >= bb[0][:,None]) * (locs < bb[1][:,None]), axis=0)\n locs = locs[:,inside_bounds]\n cols = cols[:,inside_bounds]\n \n if voxel_size is not None and visibility is not None:\n voxel_gridsize = np.array(visibility.shape).astype(np.int32).reshape(-1,1)\n\n # bounding box filtering\n voxs = ((locs - bb[0].reshape(-1,1)) / voxel_size).astype(np.int32)\n\n # visibility filtering\n visibility_lin = visibility.reshape(-1)\n voxs_lin = voxs[0] * voxel_gridsize[1] * voxel_gridsize[2] + voxs[1] * voxel_gridsize[2] + voxs[2]\n voxs_vis = visibility_lin[voxs_lin].reshape(-1) > 0\n locs = locs[:,voxs_vis]\n cols = cols[:,voxs_vis]\n\n # density filtering: making sure that we have roughly the same spatial density everywhere\n # we only do this probabilistically, for speed -- give each point a survival chance, then sample\n if actual_sparsity is None:\n actual_sparsity = sparsity\n if actual_threshold is None:\n actual_threshold = threshold\n if actual_count_threshold is None:\n actual_count_threshold = count_threshold\n\n counts_sparsity, counts_threshold = sparsity_count(locs, actual_sparsity, actual_threshold)\n survival = (np.random.rand(counts_sparsity.shape[0]) < 1/counts_sparsity) * (counts_threshold >= actual_count_threshold)\n locs = locs[:,survival]\n cols = cols[:,survival]\n\n return locs, cols\n"
] |
[
[
"torch.abs",
"torch.nn.Conv2d",
"torch.device",
"torch.clamp",
"numpy.array"
],
[
"torch.cuda.synchronize",
"torch.Tensor",
"numpy.all",
"numpy.random.rand",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wanghongsheng01/oneflow_cambricon
|
[
"187faaa2cb9ba995080ba22499b6219c2d36f0ac"
] |
[
"oneflow_cambricon-cambricon/oneflow/python/test/ops/test_layers_conv1d.py"
] |
[
"\"\"\"\nCopyright 2020 The OneFlow Authors. All rights reserved.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\nimport unittest\nimport os\nfrom collections import OrderedDict\n\nimport numpy as np\nimport oneflow as flow\nimport tensorflow as tf\nimport test_global_storage\nfrom test_util import GenArgList\n\ngpus = tf.config.experimental.list_physical_devices(\"GPU\")\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\n\ndef grouped_convolution1D(\n inputs, filters, padding, num_groups, strides=None, dilation_rate=None\n):\n # Split input and outputs along their last dimension\n input_list = tf.split(inputs, num_groups, axis=-1)\n filter_list = tf.split(filters, num_groups, axis=-1)\n output_list = []\n\n # Perform a normal convolution on each split of the input and filters\n for conv_idx, (input_tensor, filter_tensor) in enumerate(\n zip(input_list, filter_list)\n ):\n output_list.append(\n tf.nn.conv1d(\n input_tensor,\n filter_tensor,\n padding=\"VALID\",\n stride=[1, 1, 1],\n data_format=\"NWC\",\n )\n )\n # Concatenate ouptputs along their last dimentsion\n outputs = tf.concat(output_list, axis=-1)\n return outputs\n\n\ndef compare_with_tensorflow(\n test_case, device_type, x_shape, filters, kernel_size, groups\n):\n assert flow.is_valid_device_tag(device_type)\n\n flow.clear_default_session()\n func_config = flow.FunctionConfig()\n func_config.default_data_type(flow.float)\n\n @flow.global_function(type=\"train\", function_config=func_config)\n def ConvJob():\n with flow.scope.placement(device_type, \"0:0\"):\n x = flow.get_variable(\n \"x\",\n shape=x_shape,\n dtype=flow.float,\n initializer=flow.random_uniform_initializer(minval=0, maxval=100),\n trainable=True,\n )\n loss = flow.layers.conv1d(\n x,\n filters,\n kernel_size=kernel_size,\n strides=[1],\n padding=\"valid\",\n data_format=\"NCW\",\n dilation_rate=1,\n groups=groups,\n use_bias=False,\n kernel_initializer=flow.random_uniform_initializer(\n minval=0, maxval=100\n ),\n weight_name=\"conv1d_weight\",\n )\n weight_shape = (filters, x.shape[1] // groups, kernel_size)\n weight = flow.get_variable(\n name=\"conv1d_weight\",\n shape=weight_shape,\n dtype=flow.float,\n initializer=flow.random_uniform_initializer(minval=0, maxval=100),\n )\n flow.optimizer.SGD(\n flow.optimizer.PiecewiseConstantScheduler([], [1e-4]), momentum=0\n ).minimize(loss)\n\n flow.watch(x, test_global_storage.Setter(\"x\"))\n flow.watch_diff(x, test_global_storage.Setter(\"x_diff\"))\n flow.watch(weight, test_global_storage.Setter(\"weight\"))\n flow.watch_diff(weight, test_global_storage.Setter(\"weight_diff\"))\n flow.watch(loss, test_global_storage.Setter(\"loss\"))\n flow.watch_diff(loss, test_global_storage.Setter(\"loss_diff\"))\n\n return loss\n\n # OneFlow\n of_out = ConvJob().get()\n\n # TensorFlow\n with tf.GradientTape(persistent=True) as tape:\n x = tf.Variable(test_global_storage.Get(\"x\").transpose(0, 2, 1))\n assert groups > 0\n assert x_shape[1] % groups == 0\n assert filters % groups == 0\n if groups == 1:\n weight = tf.Variable(test_global_storage.Get(\"weight\").transpose(2, 1, 0))\n tf_out = tf.nn.conv1d(\n x, weight, stride=[1, 1, 1], padding=\"VALID\", data_format=\"NWC\"\n )\n else:\n weight = tf.Variable(test_global_storage.Get(\"weight\").transpose(2, 1, 0))\n tf_out = grouped_convolution1D(\n x, weight, padding=\"VALID\", num_groups=groups\n )\n\n loss_diff = test_global_storage.Get(\"loss_diff\").transpose(0, 2, 1)\n tf_x_diff = tape.gradient(tf_out, x, loss_diff)\n tf_weight_diff = tape.gradient(tf_out, weight, loss_diff)\n\n of_out_np = of_out.numpy().transpose(0, 2, 1)\n tf_out_np = tf_out.numpy()\n max_abs_diff = np.max(np.absolute(of_out_np - tf_out_np))\n fail_info = \"\\nshape (of vs. tf): {} vs. {}\\nmax_abs_diff: {}\".format(\n of_out_np.shape, tf_out_np.shape, max_abs_diff\n )\n test_case.assertTrue(\n np.allclose(of_out_np, tf_out_np, rtol=1e-5, atol=1e-5), fail_info\n )\n\n of_x_diff_arr = test_global_storage.Get(\"x_diff\").transpose(0, 2, 1)\n tf_x_diff_arr = tf_x_diff.numpy()\n max_abs_diff = np.max(np.abs(of_x_diff_arr - tf_x_diff_arr))\n\n test_case.assertTrue(\n np.allclose(of_x_diff_arr, tf_x_diff_arr, rtol=1e-5, atol=1e-4)\n )\n test_case.assertTrue(\n np.allclose(\n test_global_storage.Get(\"weight_diff\").transpose(2, 1, 0),\n tf_weight_diff.numpy(),\n rtol=1e-5,\n atol=1e-5,\n )\n )\n\n\[email protected]_unless_1n1d()\nclass TestLayersConv1d(flow.unittest.TestCase):\n def test_conv1(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\"]\n arg_dict[\"x_shape\"] = [(10, 32, 20)]\n arg_dict[\"filters\"] = [64]\n arg_dict[\"kernel_size\"] = [3]\n arg_dict[\"groups\"] = [32]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(test_case, *arg)\n\n def test_conv2(test_case):\n arg_dict = OrderedDict()\n arg_dict[\"device_type\"] = [\"gpu\", \"cpu\"]\n arg_dict[\"x_shape\"] = [(10, 32, 20)]\n arg_dict[\"filters\"] = [32]\n arg_dict[\"kernel_size\"] = [3, 2]\n arg_dict[\"groups\"] = [1]\n for arg in GenArgList(arg_dict):\n compare_with_tensorflow(test_case, *arg)\n\n\nif __name__ == \"__main__\":\n unittest.main()\n"
] |
[
[
"numpy.absolute",
"tensorflow.concat",
"numpy.allclose",
"numpy.abs",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.split",
"tensorflow.nn.conv1d",
"tensorflow.GradientTape"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
}
] |
Jo951128/2021-2-MIP
|
[
"511e0a38816d16fdba9631f76cf913ba51c43138"
] |
[
"datasets.py"
] |
[
"import torch\nfrom torch.utils.data import Dataset \nimport cv2\nimport numpy as np \nimport pandas as pd\n\n__all__ = ['VideoDataset', 'VideoLabelDataset']\n\nclass VideoDataset(Dataset):\n \"\"\" Video Dataset for loading video.\n It will output only path of video (neither video file path or video folder path). \n However, you can load video as torch.Tensor (C x L x H x W).\n See below for an example of how to read video as torch.Tensor.\n Your video dataset can be image frames or video files.\n Args:\n csv_file (str): path fo csv file which store path of video file or video folder.\n the format of csv_file should like:\n \n # example_video_file.csv (if the videos of dataset is saved as video file)\n path\n ~/path/to/video/file1.mp4\n ~/path/to/video/file2.mp4\n ~/path/to/video/file3.mp4\n ~/path/to/video/file4.mp4\n # example_video_folder.csv (if the videos of dataset is saved as image frames)\n \n path\n ~/path/to/video/folder1/\n ~/path/to/video/folder2/\n ~/path/to/video/folder3/\n ~/path/to/video/folder4/\n Example:\n if the videos of dataset is saved as video file\n >>> import torch\n >>> from datasets import VideoDataset\n >>> import transforms\n >>> dataset = VideoDataset(\n >>> \"example_video_file.csv\",\n >>> transform = transforms.VideoFilePathToTensor() # See more options at transforms.py\n >>> )\n >>> data_loader = torch.utils.data.DataLoader(dataset, batch_size = 1, shuffle = True)\n >>> for videos in data_loader:\n >>> print(videos.size())\n if the video of dataset is saved as frames in video folder\n The tree like: (The names of the images are arranged in ascending order of frames)\n ~/path/to/video/folder1\n ├── frame-001.jpg\n ├── frame-002.jpg\n ├── frame-003.jpg\n └── frame-004.jpg\n >>> import torch\n >>> from datasets import VideoDataset\n >>> import transforms\n >>> dataset = VideoDataset(\n >>> \"example_video_folder.csv\",\n >>> transform = transforms.VideoFolderPathToTensor() # See more options at transforms.py\n >>> )\n >>> data_loader = torch.utils.data.DataLoader(dataset, batch_size = 1, shuffle = True)\n >>> for videos in data_loader:\n >>> print(videos.size())\n \"\"\"\n def __init__(self, csv_file, transform=None):\n self.dataframe = pd.read_csv(csv_file)\n self.transform = transform \n\n def __len__(self):\n \"\"\"\n Returns:\n int: number of rows of the csv file (not include the header).\n \"\"\"\n return len(self.dataframe)\n\n def __getitem__(self, index):\n \"\"\" get a video \"\"\"\n video = self.dataframe.iloc[index].path\n if self.transform:\n video = self.transform(video)\n return video\n\n\nclass VideoLabelDataset(Dataset):\n \"\"\" Dataset Class for Loading Video.\n It will output path and label. However, you can load video as torch.Tensor (C x L x H x W).\n See below for an example of how to read video as torch.Tensor.\n You can load tensor from video file or video folder by using the same way as VideoDataset.\n Args:\n csv_file (str): path fo csv file which store path and label of video file (or video folder).\n the format of csv_file should like:\n \n path, label\n ~/path/to/video/file1.mp4, 0\n ~/path/to/video/file2.mp4, 1\n ~/path/to/video/file3.mp4, 0\n ~/path/to/video/file4.mp4, 2\n Example:\n >>> import torch\n >>> import transforms\n >>> dataset = VideoDataset(\n >>> \"example_video_file_with_label.csv\",\n >>> transform = transforms.VideoFilePathToTensor() # See more options at transforms.py\n >>> )\n >>> data_loader = torch.utils.data.DataLoader(dataset, batch_size = 1, shuffle = True)\n >>> for videos, labels in data_loader:\n >>> print(videos.size())\n \"\"\"\n def __init__(self, csv_file, transform=None):\n self.dataframe = pd.read_csv(csv_file)\n self.transform = transform \n\n def __len__(self):\n \"\"\"\n Returns:\n int: number of rows of the csv file (not include the header).\n \"\"\"\n return len(self.dataframe)\n\n def __getitem__(self, index):\n \"\"\" get a video and its label \"\"\"\n video = self.dataframe.iloc[index].path\n label = self.dataframe.iloc[index].label \n if self.transform:\n\n video = self.transform(video)\n\n \n\n return video, label\n\n\nif __name__ == '__main__':\n import torchvision\n import PIL \n import transforms\n\n # test for VideoDataset\n dataset = VideoDataset(\n './data/example_video_file.csv', \n )\n path = dataset[0]\n print(path)\n\n test_loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True)\n for video in test_loader:\n print(video)\n \n # test for VideoLabelDataset\n dataset = VideoLabelDataset(\n './data/example_video_file_with_label.csv', \n transform=torchvision.transforms.Compose([\n transforms.VideoFilePathToTensor(max_len=50, fps=10, padding_mode='last'),\n transforms.VideoRandomCrop([512, 512]),\n transforms.VideoResize([256, 256]),\n ]) \n )\n video, label = dataset[0]\n print(video.size(), label)\n frame1 = torchvision.transforms.ToPILImage()(video[:, 29, :, :])\n frame2 = torchvision.transforms.ToPILImage()(video[:, 39, :, :])\n frame1.show()\n frame2.show()\n\n test_loader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True)\n \n for videos, labels in test_loader:\n print(videos.size(), label)\n"
] |
[
[
"pandas.read_csv",
"torch.utils.data.DataLoader"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
jleni/lola
|
[
"9b9a2122aefc97d9ed1529b875912816f1acb5d6"
] |
[
"lola/utils.py"
] |
[
"\"\"\"\nVarious utility functions.\n\"\"\"\nimport numpy as np\nimport tensorflow as tf\n\n\ndef batch_to_seq(h, nbatch, nsteps, flat=False):\n if flat:\n h = tf.reshape(h, [nbatch, nsteps])\n else:\n h = tf.reshape(h, [nbatch, nsteps, -1])\n return [tf.squeeze(v, [1]) for v in tf.split(axis=1, num_or_size_splits=nsteps, value=h)]\n\n\ndef seq_to_batch(h, flat = False):\n shape = h[0].get_shape().as_list()\n if not flat:\n assert(len(shape) > 1)\n nh = h[0].get_shape()[-1].value\n return tf.reshape(tf.concat(axis=1, values=h), [-1, nh])\n else:\n return tf.reshape(tf.stack(values=h, axis=1), [-1])\n\n\ndef lstm(xs, s, scope, nh, init_scale=1.0):\n nbatch, nin = [v.value for v in xs[0].get_shape()]\n nsteps = len(xs)\n with tf.variable_scope(scope):\n wx = tf.get_variable(\"wx\", [nin, nh*4], initializer=ortho_init(init_scale))\n wh = tf.get_variable(\"wh\", [nh, nh*4], initializer=ortho_init(init_scale))\n b = tf.get_variable(\"b\", [nh*4], initializer=tf.constant_initializer(0.0))\n\n c, h = tf.split(axis=1, num_or_size_splits=2, value=s)\n for idx, x in enumerate(xs):\n c = c\n h = h\n z = tf.matmul(x, wx) + tf.matmul(h, wh) + b\n i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)\n i = tf.nn.sigmoid(i)\n f = tf.nn.sigmoid(f)\n o = tf.nn.sigmoid(o)\n u = tf.tanh(u)\n c = f*c + i*u\n h = o*tf.tanh(c)\n xs[idx] = h\n s = tf.concat(axis=1, values=[c, h])\n return xs, s\n\n\ndef ortho_init(scale=1.0):\n def _ortho_init(shape, dtype, partition_info=None):\n #lasagne ortho init for tf\n shape = tuple(shape)\n if len(shape) == 2:\n flat_shape = shape\n elif len(shape) == 4: # assumes NHWC\n flat_shape = (np.prod(shape[:-1]), shape[-1])\n else:\n raise NotImplementedError\n a = np.random.normal(0.0, 1.0, flat_shape)\n u, _, v = np.linalg.svd(a, full_matrices=False)\n q = u if u.shape == flat_shape else v # pick the one with the correct shape\n q = q.reshape(shape)\n return (scale * q[:shape[0], :shape[1]]).astype(np.float32)\n return _ortho_init\n\n\ndef get_session():\n return tf.get_default_session()\n\n\ndef var_shape(x):\n out = x.get_shape().as_list()\n return out\n\n\ndef intprod(x):\n return int(np.prod(x))\n\n\ndef numel(x):\n return intprod(var_shape(x))\n\n\ndef flatgrad(loss, var_list, clip_norm=None):\n grads = tf.gradients(loss, var_list)\n if clip_norm is not None:\n grads = [tf.clip_by_norm(grad, clip_norm=clip_norm) for grad in grads]\n return tf.concat(axis=0, values=[\n tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])\n for (v, grad) in zip(var_list, grads)\n ])\n\n\nclass SetFromFlat(object):\n def __init__(self, var_list, dtype=tf.float32):\n assigns = []\n shapes = list(map(var_shape, var_list))\n total_size = np.sum([intprod(shape) for shape in shapes])\n\n self.theta = theta = tf.placeholder(dtype, [total_size])\n start = 0\n assigns = []\n for (shape, v) in zip(shapes, var_list):\n size = intprod(shape)\n assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))\n start += size\n self.op = tf.group(*assigns)\n\n def __call__(self, theta):\n get_session().run(self.op, feed_dict={self.theta: theta})\n\n\nclass GetFlat(object):\n def __init__(self, var_list):\n self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])\n\n def __call__(self):\n return get_session().run(self.op)\n\n\ndef get_monte_carlo(reward, y, trace_length, batch_size):\n reward = np.reshape(reward, ((batch_size, trace_length)))\n reward_buffer = np.zeros(((batch_size, trace_length+1)))\n reward_buffer[:, :trace_length] = reward\n discounted_reward = np.zeros(((batch_size, trace_length)))\n\n for t in range(trace_length-1, -1, -1):\n reward_buffer[:,t+1:] *= y\n discounted_reward[:,t] = np.sum(reward_buffer[:,t:],1)\n\n return np.reshape(discounted_reward,(batch_size *trace_length))\n\n\ndef make_cube(trace_length):\n cube = tf.Variable(tf.zeros([trace_length, trace_length, trace_length]))\n\n cube_ops = []\n for i in range(trace_length):\n cube_ops.append(cube[i, :(i+1), :(i+1)].assign(tf.ones([i+1, i+1])))\n return cube, cube_ops\n"
] |
[
[
"tensorflow.concat",
"tensorflow.zeros",
"tensorflow.stack",
"tensorflow.tanh",
"tensorflow.group",
"numpy.linalg.svd",
"numpy.reshape",
"tensorflow.gradients",
"tensorflow.squeeze",
"tensorflow.clip_by_norm",
"numpy.zeros",
"tensorflow.matmul",
"tensorflow.nn.sigmoid",
"tensorflow.placeholder",
"tensorflow.zeros_like",
"tensorflow.split",
"numpy.sum",
"tensorflow.get_default_session",
"tensorflow.reshape",
"tensorflow.ones",
"tensorflow.constant_initializer",
"numpy.random.normal",
"numpy.prod",
"tensorflow.variable_scope"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
alibaba/EasyRec
|
[
"436205f8480fa131d4b6e9d166b3ab85bd6b9d9d",
"436205f8480fa131d4b6e9d166b3ab85bd6b9d9d"
] |
[
"easy_rec/python/test/export_test.py",
"easy_rec/python/tools/criteo/convert_data.py"
] |
[
"# -*- encoding:utf-8 -*-\n# Copyright (c) Alibaba, Inc. and its affiliates.\n# Date: 2020-10-06\n# Filename:export_test.py\nimport functools\nimport json\nimport logging\nimport os\nimport unittest\n\nimport numpy as np\nimport tensorflow as tf\n\nimport easy_rec\nfrom easy_rec.python.inference.predictor import Predictor\nfrom easy_rec.python.utils import config_util\nfrom easy_rec.python.utils import test_utils\nfrom easy_rec.python.utils.test_utils import RunAsSubprocess\n\nif tf.__version__ >= '2.0':\n gfile = tf.compat.v1.gfile\nelse:\n gfile = tf.gfile\n\n\nclass ExportTest(tf.test.TestCase):\n\n def setUp(self):\n logging.info('Testing %s.%s' % (type(self).__name__, self._testMethodName))\n\n def tearDown(self):\n test_utils.set_gpu_id(None)\n\n @RunAsSubprocess\n def _predict_and_check(self,\n data_path,\n saved_model_dir,\n cmp_result,\n keys=['probs'],\n separator=',',\n tol=1e-4):\n predictor = Predictor(saved_model_dir)\n with open(data_path, 'r') as fin:\n inputs = []\n for line_str in fin:\n line_str = line_str.strip()\n if len(predictor.input_names) > 1:\n inputs.append(line_str.split(separator))\n else:\n inputs.append(line_str)\n output_res = predictor.predict(inputs, batch_size=32)\n\n for i in range(len(output_res)):\n for key in keys:\n val0 = output_res[i][key]\n val1 = cmp_result[i][key]\n diff = np.max(np.abs(val0 - val1))\n assert diff < tol, \\\n 'too much difference: %.6f for %s, tol=%.6f' \\\n % (diff, key, tol)\n\n def _extract_data(self, input_path, output_path, offset=1, separator=','):\n with open(input_path, 'r') as fin:\n with open(output_path, 'w') as fout:\n for line_str in fin:\n line_str = line_str.strip()\n line_toks = line_str.split(separator)\n if offset > 0:\n line_toks = line_toks[offset:]\n fout.write('%s\\n' % (separator.join(line_toks)))\n\n def _extract_rtp_data(self, input_path, output_path, separator=';'):\n with open(input_path, 'r') as fin:\n with open(output_path, 'w') as fout:\n for line_str in fin:\n line_str = line_str.strip()\n line_toks = line_str.split(separator)\n fout.write('%s\\n' % line_toks[-1])\n\n def test_multi_tower(self):\n self._export_test('samples/model_config/multi_tower_export.config',\n self._extract_data)\n\n def test_filter_input(self):\n self._export_test('samples/model_config/export_filter_input.config',\n self._extract_data)\n\n def test_mmoe(self):\n self._export_test(\n 'samples/model_config/mmoe_on_taobao.config',\n functools.partial(self._extract_data, offset=2),\n keys=['probs_ctr', 'probs_cvr'])\n\n def test_fg(self):\n self._export_test(\n 'samples/model_config/taobao_fg.config',\n self._extract_rtp_data,\n separator='\u0002')\n\n def test_fg_export(self):\n self._export_test(\n 'samples/model_config/taobao_fg_export.config',\n self._extract_rtp_data,\n separator='\u0002',\n test_multi=False)\n\n def test_export_with_asset(self):\n pipeline_config_path = 'samples/model_config/taobao_fg.config'\n test_dir = test_utils.get_tmp_dir()\n # prepare model\n self.assertTrue(\n test_utils.test_single_train_eval(\n pipeline_config_path, test_dir=test_dir))\n test_utils.set_gpu_id(None)\n config_path = os.path.join(test_dir, 'pipeline.config')\n export_dir = os.path.join(test_dir, 'export/')\n export_cmd = \"\"\"\n python -m easy_rec.python.export\n --pipeline_config_path %s\n --export_dir %s\n --asset_files fg.json:samples/model_config/taobao_fg.json\n \"\"\" % (\n config_path,\n export_dir,\n )\n proc = test_utils.run_cmd(export_cmd,\n '%s/log_%s.txt' % (test_dir, 'export'))\n proc.wait()\n self.assertTrue(proc.returncode == 0)\n files = gfile.Glob(export_dir + '*')\n export_dir = files[0]\n assert gfile.Exists(export_dir + '/assets/taobao_fg.json')\n assert gfile.Exists(export_dir + '/assets/pipeline.config')\n\n def test_export_with_out_in_ckpt_config(self):\n test_dir = test_utils.get_tmp_dir()\n logging.info('test dir: %s' % test_dir)\n\n pipeline_config_path = 'samples/model_config/mmoe_on_taobao.config'\n\n def _post_check_func(pipeline_config):\n ckpt_path = tf.train.latest_checkpoint(pipeline_config.model_dir)\n export_dir = os.path.join(test_dir, 'train/export/no_config')\n export_cmd = \"\"\"\n python -m easy_rec.python.export\n --pipeline_config_path %s\n --checkpoint_path %s\n --export_dir %s\n \"\"\" % (pipeline_config_path, ckpt_path, export_dir)\n proc = test_utils.run_cmd(export_cmd,\n '%s/log_%s.txt' % (test_dir, 'export'))\n proc.wait()\n return proc.returncode == 0\n\n # prepare model\n self.assertTrue(\n test_utils.test_single_train_eval(\n pipeline_config_path,\n test_dir=test_dir,\n post_check_func=_post_check_func))\n\n def test_multi_class_predict(self):\n self._export_test('samples/model_config/deepfm_multi_cls_on_avazu_ctr.config',\n extract_data_func=self._extract_data,\n keys=['probs', 'logits', 'probs_y', 'logits_y', 'y'])\n\n def _export_test(self,\n pipeline_config_path,\n extract_data_func=None,\n separator=',',\n keys=['probs'],\n test_multi=True):\n test_dir = test_utils.get_tmp_dir()\n logging.info('test dir: %s' % test_dir)\n\n # prepare model\n self.assertTrue(\n test_utils.test_single_train_eval(\n pipeline_config_path, test_dir=test_dir))\n test_utils.set_gpu_id(None)\n\n # prepare two version config\n config_path_single = os.path.join(test_dir, 'pipeline.config')\n config_path_multi = os.path.join(test_dir, 'pipeline_v2.config')\n pipeline_config = config_util.get_configs_from_pipeline_file(\n config_path_single)\n if pipeline_config.export_config.multi_placeholder:\n config_path_single, config_path_multi = config_path_multi, config_path_single\n pipeline_config.export_config.multi_placeholder =\\\n not pipeline_config.export_config.multi_placeholder\n config_util.save_pipeline_config(pipeline_config, test_dir,\n 'pipeline_v2.config')\n\n # prepare two version export dir\n export_dir_single = os.path.join(test_dir, 'train/export/final')\n export_dir_multi = os.path.join(test_dir, 'train/export/multi')\n export_cmd = \"\"\"\n python -m easy_rec.python.export\n --pipeline_config_path %s\n --export_dir %s\n \"\"\" % (config_path_multi, export_dir_multi)\n proc = test_utils.run_cmd(export_cmd,\n '%s/log_%s.txt' % (test_dir, 'export'))\n proc.wait()\n self.assertTrue(proc.returncode == 0)\n\n # use checkpoint to prepare result\n result_path = os.path.join(test_dir, 'result.txt')\n predict_cmd = \"\"\"\n python -m easy_rec.python.predict\n --pipeline_config_path %s\n --output_path %s\n \"\"\" % (config_path_single, result_path)\n proc = test_utils.run_cmd(predict_cmd % (),\n '%s/log_%s.txt' % (test_dir, 'predict'))\n proc.wait()\n self.assertTrue(proc.returncode == 0)\n with open(result_path, 'r') as fin:\n cmp_result = []\n for line_str in fin:\n line_str = line_str.strip()\n cmp_result.append(json.loads(line_str))\n\n test_data_path = pipeline_config.eval_input_path\n if extract_data_func is not None:\n tmp_data_path = os.path.join(test_dir, 'pred_input_data')\n extract_data_func(test_data_path, tmp_data_path)\n test_data_path = tmp_data_path\n self._predict_and_check(\n test_data_path,\n export_dir_single,\n cmp_result,\n keys=keys,\n separator=separator)\n if test_multi:\n self._predict_and_check(\n test_data_path,\n export_dir_multi,\n cmp_result,\n keys=keys,\n separator=separator)\n test_utils.clean_up(test_dir)\n\n def _test_big_model_export(self,\n pipeline_config_path,\n test_data_path,\n extract_data_func=None,\n total_steps=50):\n test_dir = test_utils.get_tmp_dir()\n logging.info('test dir: %s' % test_dir)\n\n lookup_op_path = os.path.join(easy_rec.ops_dir, 'libembed_op.so')\n tf.load_op_library(lookup_op_path)\n\n # prepare model\n self.assertTrue(\n test_utils.test_single_train_eval(\n pipeline_config_path, test_dir=test_dir, total_steps=total_steps))\n\n test_utils.set_gpu_id(None)\n # the pipeline.config is produced by the prepare model cmd\n config_path = os.path.join(test_dir, 'pipeline.config')\n export_dir = os.path.join(test_dir, 'export/')\n export_cmd = \"\"\"\n python -m easy_rec.python.export\n --pipeline_config_path %s\n --export_dir %s\n --asset_files %s\n --redis_url %s\n --redis_passwd %s\n --redis_threads 1\n --redis_write_kv 1\n --verbose 1\n \"\"\" % (config_path, export_dir, test_data_path, os.environ['redis_url'],\n os.environ['redis_passwd'])\n proc = test_utils.run_cmd(export_cmd,\n '%s/log_%s.txt' % (test_dir, 'export'))\n proc.wait()\n self.assertTrue(proc.returncode == 0)\n\n export_dir = gfile.Glob(export_dir + '[0-9][0-9][0-9]*')[0]\n _, test_data_name = os.path.split(test_data_path)\n assert gfile.Exists(export_dir + '/assets/' + test_data_name)\n\n # use checkpoint to prepare result\n result_path = os.path.join(test_dir, 'result.txt')\n predict_cmd = \"\"\"\n python -m easy_rec.python.predict\n --pipeline_config_path %s\n --input_path %s\n --output_path %s\n \"\"\" % (config_path, test_data_path, result_path)\n proc = test_utils.run_cmd(predict_cmd % (),\n '%s/log_%s.txt' % (test_dir, 'predict'))\n proc.wait()\n self.assertTrue(proc.returncode == 0)\n with open(result_path, 'r') as fin:\n cmp_result = []\n for line_str in fin:\n line_str = line_str.strip()\n cmp_result.append(json.loads(line_str))\n\n if extract_data_func is not None:\n tmp_data_path = os.path.join(test_dir, 'pred_input_data')\n extract_data_func(test_data_path, tmp_data_path)\n test_data_path = tmp_data_path\n self._predict_and_check(test_data_path, export_dir, cmp_result)\n\n @unittest.skipIf(\n 'redis_url' not in os.environ,\n 'Only execute when redis is available: redis_url, redis_passwd')\n def test_big_model_export(self):\n pipeline_config_path = 'samples/model_config/multi_tower_export.config'\n test_data_path = 'data/test/export/data.csv'\n self._test_big_model_export(\n pipeline_config_path,\n test_data_path,\n extract_data_func=self._extract_data)\n\n @unittest.skipIf(\n 'redis_url' not in os.environ,\n 'Only execute when redis is available: redis_url, redis_passwd')\n def test_big_model_deepfm_export(self):\n pipeline_config_path = 'samples/model_config/deepfm_combo_on_avazu_ctr.config'\n test_data_path = 'data/test/dwd_avazu_ctr_deepmodel_10w.csv'\n self._test_big_model_export(\n pipeline_config_path,\n test_data_path,\n extract_data_func=self._extract_data)\n\n @unittest.skipIf(\n 'redis_url' not in os.environ,\n 'Only execute when redis is available: redis_url, redis_passwd')\n def test_big_model_din_export(self):\n pipeline_config_path = 'samples/model_config/din_on_taobao.config'\n test_data_path = 'data/test/tb_data/taobao_test_data'\n self._test_big_model_export(\n pipeline_config_path,\n test_data_path,\n extract_data_func=functools.partial(self._extract_data, offset=2))\n\n @unittest.skipIf(\n 'redis_url' not in os.environ,\n 'Only execute when redis is available: redis_url, redis_passwd')\n def test_big_model_wide_and_deep_export(self):\n pipeline_config_path = 'samples/model_config/wide_and_deep_two_opti.config'\n test_data_path = 'data/test/dwd_avazu_ctr_deepmodel_10w.csv'\n self._test_big_model_export(\n pipeline_config_path,\n test_data_path,\n extract_data_func=functools.partial(self._extract_data))\n\n @unittest.skipIf(\n 'redis_url' not in os.environ or '-PAI' not in tf.__version__,\n 'Only execute when pai-tf and redis is available: redis_url, redis_passwd'\n )\n def test_big_model_embedding_variable_export(self):\n pipeline_config_path = 'samples/model_config/taobao_fg_ev.config'\n test_data_path = 'data/test/rtp/taobao_valid_feature.txt'\n self._test_big_model_export(\n pipeline_config_path,\n test_data_path,\n self._extract_rtp_data,\n total_steps=1000)\n\n @unittest.skipIf(\n 'oss_endpoint' not in os.environ or 'oss_ak' not in os.environ or\n 'oss_sk' not in os.environ or 'oss_path' not in os.environ or\n '-PAI' not in tf.__version__,\n 'Only execute oss params(oss_endpoint,oss_ak,oss_sk) are specified,'\n 'and pai-tf is available.')\n def test_big_model_embedding_variable_oss_export(self):\n pipeline_config_path = 'samples/model_config/taobao_fg_ev.config'\n test_data_path = 'data/test/rtp/taobao_valid_feature.txt'\n self._test_big_model_export_to_oss(\n pipeline_config_path,\n test_data_path,\n self._extract_rtp_data,\n total_steps=100)\n\n @unittest.skipIf(\n 'oss_endpoint' not in os.environ or 'oss_ak' not in os.environ or\n 'oss_sk' not in os.environ or 'oss_path' not in os.environ or\n '-PAI' not in tf.__version__,\n 'Only execute oss params(oss_endpoint,oss_ak,oss_sk) are specified,'\n 'and pai-tf is available.')\n def test_big_model_embedding_variable_v2_oss_export(self):\n pipeline_config_path = 'samples/model_config/taobao_fg_ev_v2.config'\n test_data_path = 'data/test/rtp/taobao_valid_feature.txt'\n self._test_big_model_export_to_oss(\n pipeline_config_path,\n test_data_path,\n self._extract_rtp_data,\n total_steps=1000)\n\n def _test_big_model_export_to_oss(self,\n pipeline_config_path,\n test_data_path,\n extract_data_func=None,\n total_steps=50):\n test_dir = test_utils.get_tmp_dir()\n logging.info('test dir: %s' % test_dir)\n\n lookup_op_path = os.path.join(easy_rec.ops_dir, 'libembed_op.so')\n tf.load_op_library(lookup_op_path)\n\n # prepare model\n self.assertTrue(\n test_utils.test_single_train_eval(\n pipeline_config_path, test_dir=test_dir, total_steps=total_steps))\n\n test_utils.set_gpu_id(None)\n # the pipeline.config is produced by the prepare model cmd\n config_path = os.path.join(test_dir, 'pipeline.config')\n export_dir = os.path.join(test_dir, 'export/')\n export_cmd = \"\"\"\n python -m easy_rec.python.export\n --pipeline_config_path %s\n --export_dir %s\n --asset_files %s\n --oss_path %s\n --oss_endpoint %s\n --oss_ak %s --oss_sk %s\n --oss_threads 5\n --oss_timeout 10\n --oss_write_kv 1\n --verbose 1\n \"\"\" % (config_path, export_dir, test_data_path, os.environ['oss_path'],\n os.environ['oss_endpoint'], os.environ['oss_ak'],\n os.environ['oss_sk'])\n proc = test_utils.run_cmd(export_cmd,\n '%s/log_%s.txt' % (test_dir, 'export'))\n proc.wait()\n self.assertTrue(proc.returncode == 0)\n\n export_dir = gfile.Glob(export_dir + '[0-9][0-9][0-9]*')[0]\n _, test_data_name = os.path.split(test_data_path)\n assert gfile.Exists(export_dir + '/assets/' + test_data_name)\n\n # use checkpoint to prepare result\n result_path = os.path.join(test_dir, 'result.txt')\n predict_cmd = \"\"\"\n python -m easy_rec.python.predict\n --pipeline_config_path %s\n --input_path %s\n --output_path %s\n \"\"\" % (config_path, test_data_path, result_path)\n proc = test_utils.run_cmd(predict_cmd % (),\n '%s/log_%s.txt' % (test_dir, 'predict'))\n proc.wait()\n self.assertTrue(proc.returncode == 0)\n with open(result_path, 'r') as fin:\n cmp_result = []\n for line_str in fin:\n line_str = line_str.strip()\n cmp_result.append(json.loads(line_str))\n\n if extract_data_func is not None:\n tmp_data_path = os.path.join(test_dir, 'pred_input_data')\n extract_data_func(test_data_path, tmp_data_path)\n test_data_path = tmp_data_path\n self._predict_and_check(test_data_path, export_dir, cmp_result)\n\n @unittest.skipIf(\n 'oss_path' not in os.environ,\n 'Only execute when oss is available: oss_path, oss_endpoint, oss_ak, oss_sk'\n )\n def test_big_model_export_to_oss(self):\n pipeline_config_path = 'samples/model_config/multi_tower_export.config'\n test_data_path = 'data/test/export/data.csv'\n self._test_big_model_export_to_oss(\n pipeline_config_path,\n test_data_path,\n extract_data_func=self._extract_data)\n\n @unittest.skipIf(\n 'oss_path' not in os.environ,\n 'Only execute when oss is available: oss_path, oss_endpoint, oss_ak, oss_sk'\n )\n def test_big_model_deepfm_export_to_oss(self):\n pipeline_config_path = 'samples/model_config/deepfm_combo_on_avazu_ctr.config'\n test_data_path = 'data/test/dwd_avazu_ctr_deepmodel_10w.csv'\n self._test_big_model_export_to_oss(\n pipeline_config_path,\n test_data_path,\n extract_data_func=self._extract_data)\n\n @unittest.skipIf(\n 'oss_path' not in os.environ,\n 'Only execute when oss is available: oss_path, oss_endpoint, oss_ak, oss_sk'\n )\n def test_big_model_din_export_to_oss(self):\n pipeline_config_path = 'samples/model_config/din_on_taobao.config'\n test_data_path = 'data/test/tb_data/taobao_test_data'\n self._test_big_model_export_to_oss(\n pipeline_config_path,\n test_data_path,\n extract_data_func=functools.partial(self._extract_data, offset=2))\n\n @unittest.skipIf(\n 'oss_path' not in os.environ,\n 'Only execute when oss is available: oss_path, oss_endpoint, oss_ak, oss_sk'\n )\n def test_big_model_wide_and_deep_export_to_oss(self):\n pipeline_config_path = 'samples/model_config/wide_and_deep_two_opti.config'\n test_data_path = 'data/test/dwd_avazu_ctr_deepmodel_10w.csv'\n self._test_big_model_export_to_oss(\n pipeline_config_path,\n test_data_path,\n extract_data_func=functools.partial(self._extract_data))\n\n\nif __name__ == '__main__':\n tf.test.main()\n",
"# -*- encoding:utf-8 -*-\n# Copyright (c) Alibaba, Inc. and its affiliates.\nimport argparse\nimport gzip\nimport logging\nimport multiprocessing\nimport os\nimport sys\nimport traceback\n\nimport numpy as np\nimport six\nfrom tensorflow.python.platform import gfile\n\nlogging.basicConfig(\n level=logging.INFO, format='[%(asctime)s][%(levelname)s] %(message)s')\n\n\ndef save_np_bin(labels, dense_arr, cate_arr, prefix):\n with gfile.GFile(prefix + '_label.bin', 'wb') as fout:\n fout.write(np.array(labels, dtype=np.int32).tobytes())\n with gfile.GFile(prefix + '_dense.bin', 'wb') as fout:\n fout.write(np.array(dense_arr, dtype=np.float32).tobytes())\n with gfile.GFile(prefix + '_category.bin', 'wb') as fout:\n fout.write(np.array(cate_arr, dtype=np.float32).tobytes())\n\n\ndef convert(input_path, prefix, part_record_num):\n logging.info('start to convert %s, part_record_num=%d' %\n (input_path, part_record_num))\n batch_size = part_record_num\n labels = np.zeros([batch_size], dtype=np.int32)\n dense_arr = np.zeros([batch_size, 13], dtype=np.float32)\n cate_arr = np.zeros([batch_size, 26], dtype=np.uint32)\n part_id = 0\n total_line = 0\n try:\n sid = 0\n with gfile.GFile(input_path, 'rb') as gz_fin:\n for line_str in gzip.GzipFile(fileobj=gz_fin, mode='rb'):\n if six.PY3:\n line_str = str(line_str, 'utf-8')\n line_str = line_str.strip()\n line_toks = line_str.split('\\t')\n labels[sid] = int(line_toks[0])\n\n for j in range(1, 14):\n x = line_toks[j]\n dense_arr[sid, j - 1] = float(x) if x != '' else 0.0\n\n for j in range(14, 40):\n x = line_toks[j]\n cate_arr[sid, j - 14] = int(x, 16) if x != '' else 0\n\n sid += 1\n if sid == batch_size:\n save_np_bin(labels, dense_arr, cate_arr, prefix + '_' + str(part_id))\n part_id += 1\n total_line += sid\n sid = 0\n logging.info('\\t%s write part: %d' % (input_path, part_id - 1))\n if sid > 0:\n save_np_bin(labels[:sid], dense_arr[:sid], cate_arr[:sid],\n prefix + '_' + str(part_id))\n part_id += 1\n total_line += sid\n except Exception as ex:\n logging.error('convert %s failed: %s' % (input_path, str(ex)))\n logging.error(traceback.format_exc())\n return\n logging.info('done convert %s, total_line=%d, part_num=%d' %\n (input_path, total_line, part_id))\n\n\nif __name__ == '__main__':\n \"\"\"Convert criteo 1T data to binary format.\n\n The outputs are stored in multiple parts, each with at most part_record_num samples.\n Each part consists of 3 files:\n xxx_yyy_label.bin,\n xxx_yyy_dense.bin,\n xxx_yyy_category.bin,\n xxx is in range [0-23], range of yyy is determined by part_record_num,\n\n If part_record_num is set to the default value 8M, there will be 535 parts. We convert\n the data on machine with 64GB memory, if you memory is limited, you can convert the .gz\n files one by one, or you can set a small part_record_num.\n \"\"\"\n\n parser = argparse.ArgumentParser()\n parser.add_argument(\n '--input_dir', type=str, default=None, help='criteo 1t data dir')\n parser.add_argument(\n '--save_dir',\n type=str,\n default=None,\n help='criteo binary data output dir ')\n parser.add_argument(\n '--part_record_num',\n type=int,\n default=1024 * 1024 * 8,\n help='the maximal number of samples in each binary file')\n parser.add_argument(\n '--dt',\n nargs='*',\n type=int,\n help='select days to convert, default to select all: 0-23')\n\n args = parser.parse_args()\n\n assert args.input_dir, 'input_dir is not set'\n assert args.save_dir, 'save_dir is not set'\n\n save_dir = args.save_dir\n if not save_dir.endswith('/'):\n save_dir = save_dir + '/'\n if not gfile.IsDirectory(save_dir):\n gfile.MakeDirs(save_dir)\n\n if args.dt is None or len(args.dt) == 0:\n days = list(range(0, 24))\n else:\n days = list(args.dt)\n\n proc_arr = []\n for d in days:\n input_path = os.path.join(args.input_dir, 'day_%d.gz' % d)\n prefix = os.path.join(args.save_dir, str(d))\n proc = multiprocessing.Process(\n target=convert, args=(input_path, prefix, args.part_record_num))\n proc.start()\n proc_arr.append(proc)\n for proc in proc_arr:\n proc.join()\n"
] |
[
[
"tensorflow.train.latest_checkpoint",
"numpy.abs",
"tensorflow.load_op_library",
"tensorflow.test.main"
],
[
"tensorflow.python.platform.gfile.GFile",
"tensorflow.python.platform.gfile.MakeDirs",
"tensorflow.python.platform.gfile.IsDirectory",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
wanglxi1/tensorflow
|
[
"08ed32dbb9e8f67eec9efce3807b5bdb3933eb2f",
"f9e536b4a9d96322d7e971073602c8969dbd9369",
"08ed32dbb9e8f67eec9efce3807b5bdb3933eb2f",
"b7acb6abe0d88c674e8d149ae744b4c994434da6",
"08ed32dbb9e8f67eec9efce3807b5bdb3933eb2f"
] |
[
"tensorflow/tools/dist_test/python/mnist_replica.py",
"tensorflow/python/framework/common_shapes.py",
"tensorflow/python/training/monitored_session.py",
"tensorflow/contrib/linalg/python/ops/linear_operator_composition.py",
"tensorflow/contrib/rnn/python/tools/checkpoint_convert.py"
] |
[
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"Distributed MNIST training and validation, with model replicas.\n\nA simple softmax model with one hidden layer is defined. The parameters\n(weights and biases) are located on one parameter server (ps), while the ops\nare executed on two worker nodes by default. The TF sessions also run on the \nworker node.\nMultiple invocations of this script can be done in parallel, with different\nvalues for --task_index. There should be exactly one invocation with\n--task_index, which will create a master session that carries out variable\ninitialization. The other, non-master, sessions will wait for the master\nsession to finish the initialization before proceeding to the training stage.\n\nThe coordination between the multiple worker invocations occurs due to\nthe definition of the parameters on the same ps devices. The parameter updates\nfrom one worker is visible to all other workers. As such, the workers can\nperform forward computation and gradient calculation in parallel, which\nshould lead to increased training speed for the simple model.\n\"\"\"\n\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport math\nimport sys\nimport tempfile\nimport time\n\nimport tensorflow as tf\nfrom tensorflow.examples.tutorials.mnist import input_data\n\n\nflags = tf.app.flags\nflags.DEFINE_string(\"data_dir\", \"/tmp/mnist-data\",\n \"Directory for storing mnist data\")\nflags.DEFINE_boolean(\"download_only\", False,\n \"Only perform downloading of data; Do not proceed to \"\n \"session preparation, model definition or training\")\nflags.DEFINE_integer(\"task_index\", None,\n \"Worker task index, should be >= 0. task_index=0 is \"\n \"the master worker task the performs the variable \"\n \"initialization \")\nflags.DEFINE_integer(\"num_gpus\", 1,\n \"Total number of gpus for each machine.\"\n \"If you don't use GPU, please set it to '0'\")\nflags.DEFINE_integer(\"replicas_to_aggregate\", None,\n \"Number of replicas to aggregate before parameter update\"\n \"is applied (For sync_replicas mode only; default: \"\n \"num_workers)\")\nflags.DEFINE_integer(\"hidden_units\", 100,\n \"Number of units in the hidden layer of the NN\")\nflags.DEFINE_integer(\"train_steps\", 200,\n \"Number of (global) training steps to perform\")\nflags.DEFINE_integer(\"batch_size\", 100, \"Training batch size\")\nflags.DEFINE_float(\"learning_rate\", 0.01, \"Learning rate\")\nflags.DEFINE_boolean(\"sync_replicas\", False,\n \"Use the sync_replicas (synchronized replicas) mode, \"\n \"wherein the parameter updates from workers are aggregated \"\n \"before applied to avoid stale gradients\")\nflags.DEFINE_boolean(\n \"existing_servers\", False, \"Whether servers already exists. If True, \"\n \"will use the worker hosts via their GRPC URLs (one client process \"\n \"per worker host). Otherwise, will create an in-process TensorFlow \"\n \"server.\")\nflags.DEFINE_string(\"ps_hosts\",\"localhost:2222\",\n \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"worker_hosts\", \"localhost:2223,localhost:2224\",\n \"Comma-separated list of hostname:port pairs\")\nflags.DEFINE_string(\"job_name\", None,\"job name: worker or ps\")\n\nFLAGS = flags.FLAGS\n\n\nIMAGE_PIXELS = 28\n\n\ndef main(unused_argv):\n mnist = input_data.read_data_sets(FLAGS.data_dir, one_hot=True)\n if FLAGS.download_only:\n sys.exit(0)\n\n if FLAGS.job_name is None or FLAGS.job_name == \"\":\n raise ValueError(\"Must specify an explicit `job_name`\")\n if FLAGS.task_index is None or FLAGS.task_index ==\"\":\n raise ValueError(\"Must specify an explicit `task_index`\")\n\n print(\"job name = %s\" % FLAGS.job_name)\n print(\"task index = %d\" % FLAGS.task_index)\n\n #Construct the cluster and start the server\n ps_spec = FLAGS.ps_hosts.split(\",\")\n worker_spec = FLAGS.worker_hosts.split(\",\")\n\n # Get the number of workers.\n num_workers = len(worker_spec)\n\n cluster = tf.train.ClusterSpec({\n \"ps\": ps_spec,\n \"worker\": worker_spec})\n\n if not FLAGS.existing_servers:\n # Not using existing servers. Create an in-process server.\n server = tf.train.Server(\n cluster, job_name=FLAGS.job_name, task_index=FLAGS.task_index)\n if FLAGS.job_name == \"ps\":\n server.join()\n\n is_chief = (FLAGS.task_index == 0)\n if FLAGS.num_gpus > 0:\n if FLAGS.num_gpus < num_workers:\n raise ValueError(\"number of gpus is less than number of workers\")\n # Avoid gpu allocation conflict: now allocate task_num -> #gpu \n # for each worker in the corresponding machine\n gpu = (FLAGS.task_index % FLAGS.num_gpus)\n worker_device = \"/job:worker/task:%d/gpu:%d\" % (FLAGS.task_index, gpu)\n elif FLAGS.num_gpus == 0:\n # Just allocate the CPU to worker server\n cpu = 0\n worker_device = \"/job:worker/task:%d/cpu:%d\" % (FLAGS.task_index, cpu)\n # The device setter will automatically place Variables ops on separate\n # parameter servers (ps). The non-Variable ops will be placed on the workers.\n # The ps use CPU and workers use corresponding GPU\n with tf.device(\n tf.train.replica_device_setter(\n worker_device=worker_device,\n ps_device=\"/job:ps/cpu:0\",\n cluster=cluster)):\n global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n\n # Variables of the hidden layer\n hid_w = tf.Variable(\n tf.truncated_normal(\n [IMAGE_PIXELS * IMAGE_PIXELS, FLAGS.hidden_units],\n stddev=1.0 / IMAGE_PIXELS),\n name=\"hid_w\")\n hid_b = tf.Variable(tf.zeros([FLAGS.hidden_units]), name=\"hid_b\")\n\n # Variables of the softmax layer\n sm_w = tf.Variable(\n tf.truncated_normal(\n [FLAGS.hidden_units, 10],\n stddev=1.0 / math.sqrt(FLAGS.hidden_units)),\n name=\"sm_w\")\n sm_b = tf.Variable(tf.zeros([10]), name=\"sm_b\")\n\n # Ops: located on the worker specified with FLAGS.task_index\n x = tf.placeholder(tf.float32, [None, IMAGE_PIXELS * IMAGE_PIXELS])\n y_ = tf.placeholder(tf.float32, [None, 10])\n\n hid_lin = tf.nn.xw_plus_b(x, hid_w, hid_b)\n hid = tf.nn.relu(hid_lin)\n\n y = tf.nn.softmax(tf.nn.xw_plus_b(hid, sm_w, sm_b))\n cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)))\n\n opt = tf.train.AdamOptimizer(FLAGS.learning_rate)\n\n if FLAGS.sync_replicas:\n if FLAGS.replicas_to_aggregate is None:\n replicas_to_aggregate = num_workers\n else:\n replicas_to_aggregate = FLAGS.replicas_to_aggregate\n\n opt = tf.train.SyncReplicasOptimizer(\n opt,\n replicas_to_aggregate=replicas_to_aggregate,\n total_num_replicas=num_workers,\n name=\"mnist_sync_replicas\")\n\n train_step = opt.minimize(cross_entropy, global_step=global_step)\n\n if FLAGS.sync_replicas:\n local_init_op = opt.local_step_init_op\n if is_chief:\n local_init_op = opt.chief_init_op\n\n ready_for_local_init_op = opt.ready_for_local_init_op\n\n # Initial token and chief queue runners required by the sync_replicas mode\n chief_queue_runner = opt.get_chief_queue_runner()\n sync_init_op = opt.get_init_tokens_op()\n\n init_op = tf.global_variables_initializer()\n train_dir = tempfile.mkdtemp()\n\n if FLAGS.sync_replicas:\n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=train_dir,\n init_op=init_op,\n local_init_op=local_init_op,\n ready_for_local_init_op=ready_for_local_init_op,\n recovery_wait_secs=1,\n global_step=global_step)\n else:\n sv = tf.train.Supervisor(\n is_chief=is_chief,\n logdir=train_dir,\n init_op=init_op,\n recovery_wait_secs=1,\n global_step=global_step)\n\n sess_config = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=False,\n device_filters=[\"/job:ps\", \"/job:worker/task:%d\" % FLAGS.task_index])\n\n # The chief worker (task_index==0) session will prepare the session,\n # while the remaining workers will wait for the preparation to complete.\n if is_chief:\n print(\"Worker %d: Initializing session...\" % FLAGS.task_index)\n else:\n print(\"Worker %d: Waiting for session to be initialized...\" %\n FLAGS.task_index)\n\n if FLAGS.existing_servers:\n server_grpc_url = \"grpc://\" + worker_spec[FLAGS.task_index]\n print(\"Using existing server at: %s\" % server_grpc_url)\n\n sess = sv.prepare_or_wait_for_session(server_grpc_url,\n config=sess_config)\n else:\n sess = sv.prepare_or_wait_for_session(server.target, config=sess_config)\n\n print(\"Worker %d: Session initialization complete.\" % FLAGS.task_index)\n\n if FLAGS.sync_replicas and is_chief:\n # Chief worker will start the chief queue runner and call the init op.\n sess.run(sync_init_op)\n sv.start_queue_runners(sess, [chief_queue_runner])\n\n # Perform training\n time_begin = time.time()\n print(\"Training begins @ %f\" % time_begin)\n\n local_step = 0\n while True:\n # Training feed\n batch_xs, batch_ys = mnist.train.next_batch(FLAGS.batch_size)\n train_feed = {x: batch_xs, y_: batch_ys}\n\n _, step = sess.run([train_step, global_step], feed_dict=train_feed)\n local_step += 1\n\n now = time.time()\n print(\"%f: Worker %d: training step %d done (global step: %d)\" %\n (now, FLAGS.task_index, local_step, step))\n\n if step >= FLAGS.train_steps:\n break\n\n time_end = time.time()\n print(\"Training ends @ %f\" % time_end)\n training_time = time_end - time_begin\n print(\"Training elapsed time: %f s\" % training_time)\n\n # Validation feed\n val_feed = {x: mnist.validation.images, y_: mnist.validation.labels}\n val_xent = sess.run(cross_entropy, feed_dict=val_feed)\n print(\"After %d training step(s), validation cross entropy = %g\" %\n (FLAGS.train_steps, val_xent))\n\n\nif __name__ == \"__main__\":\n tf.app.run()\n",
"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A library of common shape functions.\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\nimport six.moves\n\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.framework import cpp_shape_inference_pb2\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.framework import tensor_util\n\n\ndef scalar_shape(unused_op):\n \"\"\"Shape function for ops that output a scalar value.\"\"\"\n return [tensor_shape.scalar()]\n\n\ndef unchanged_shape(op):\n \"\"\"Shape function for ops that output an tensor like their first input.\"\"\"\n return [op.inputs[0].get_shape()]\n\n\ndef unchanged_shape_with_rank(rank):\n \"\"\"Returns a shape function for ops that constrain the rank of their input.\n\n Args:\n rank: The exact rank of the input and output.\n\n Returns:\n A shape function for ops that output a tensor of the same size as their\n input, with a particular rank.\n \"\"\"\n\n def _ShapeFunction(op):\n return [op.inputs[0].get_shape().with_rank(rank)]\n\n return _ShapeFunction\n\n\ndef unchanged_shape_with_rank_at_least(rank):\n \"\"\"Returns a shape function for ops that constrain the rank of their input.\n\n Args:\n rank: A lower bound on the rank of the input and output.\n\n Returns:\n A shape function for ops that output a tensor of the same size as their\n input, with a particular rank.\n \"\"\"\n\n def _ShapeFunction(op):\n return [op.inputs[0].get_shape().with_rank_at_least(rank)]\n\n return _ShapeFunction\n\n\ndef unchanged_shape_with_rank_at_most(rank):\n \"\"\"Returns a shape function for ops that constrain the rank of their input.\n\n Args:\n rank: An upper bound on the rank of the input and output.\n\n Returns:\n A shape function for ops that output a tensor of the same size as their\n input, with a particular rank.\n \"\"\"\n\n def _ShapeFunction(op):\n return [op.inputs[0].get_shape().with_rank_at_most(rank)]\n\n return _ShapeFunction\n\n\ndef matmul_shape(op):\n \"\"\"Shape function for a MatMul op.\"\"\"\n a_shape = op.inputs[0].get_shape().with_rank(2)\n transpose_a = op.get_attr(\"transpose_a\")\n b_shape = op.inputs[1].get_shape().with_rank(2)\n transpose_b = op.get_attr(\"transpose_b\")\n output_rows = a_shape[1] if transpose_a else a_shape[0]\n output_cols = b_shape[0] if transpose_b else b_shape[1]\n inner_a = a_shape[0] if transpose_a else a_shape[1]\n inner_b = b_shape[1] if transpose_b else b_shape[0]\n inner_a.assert_is_compatible_with(inner_b)\n return [tensor_shape.TensorShape([output_rows, output_cols])]\n\n\ndef get_conv_output_size(input_size, filter_size, strides, padding_type):\n \"\"\"Returns the spatial size of a n-d convolution/pooling output.\"\"\"\n input_size = tuple([tensor_shape.as_dimension(x).value for x in input_size])\n filter_size = tuple([tensor_shape.as_dimension(x).value for x in filter_size])\n strides = [int(x) for x in strides]\n\n if all(x == 1 for x in input_size) and all(x == 1 for x in filter_size):\n return input_size\n\n if any(x is not None and y is not None and x > y for x, y in\n zip(filter_size, input_size)):\n raise ValueError(\"Filter must not be larger than the input: \"\n \"Filter: %r Input: %r\" % (filter_size, input_size))\n\n if padding_type == b\"VALID\":\n\n def _valid(in_dim, k_dim, s_dim):\n if in_dim is not None and k_dim is not None:\n return (in_dim - k_dim + s_dim) // s_dim\n else:\n return None\n\n output_size = [\n _valid(in_dim, k_dim, s_dim)\n for in_dim, k_dim, s_dim in zip(input_size, filter_size, strides)\n ]\n elif padding_type == b\"SAME\":\n\n def _same(in_dim, s_dim):\n if in_dim is not None:\n return (in_dim + s_dim - 1) // s_dim\n else:\n return None\n\n output_size = [_same(in_dim, s_dim)\n for in_dim, s_dim in zip(input_size, strides)]\n else:\n raise ValueError(\"Invalid padding: %r\" % padding_type)\n\n return tuple(output_size)\n\n\ndef get2d_conv_output_size(input_height, input_width, filter_height,\n filter_width, row_stride, col_stride, padding_type):\n \"\"\"Returns the number of rows and columns in a convolution/pooling output.\"\"\"\n return get_conv_output_size((input_height, input_width),\n (filter_height, filter_width),\n (row_stride, col_stride), padding_type)\n\n\ndef conv2d_shape(op):\n \"\"\"Shape function for a Conv2D op.\n\n This op has two inputs:\n\n * input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]\n * filter, a 4D tensor with shape = [filter_rows, filter_cols,\n depth_in, depth_out]\n\n The output is a 4D tensor with shape = [batch_size, out_rows,\n out_cols, depth_out], where out_rows and out_cols depend on the\n value of the op's \"padding\" and \"strides\" attrs.\n\n Args:\n op: A Conv2D Operation.\n\n Returns:\n A list containing the Shape of the Conv2D output.\n\n Raises:\n ValueError: If the shapes of the input or filter are incompatible.\n \"\"\"\n input_shape = op.inputs[0].get_shape().with_rank(4)\n filter_shape = op.inputs[1].get_shape().with_rank(4)\n\n try:\n data_format = op.get_attr(\"data_format\")\n except ValueError:\n data_format = None\n\n if data_format == b\"NCHW\":\n # Convert input shape to the default NHWC for inference.\n input_shape = [input_shape[0], input_shape[2], input_shape[3],\n input_shape[1]]\n\n batch_size = input_shape[0]\n in_rows = input_shape[1]\n in_cols = input_shape[2]\n\n filter_rows = filter_shape[0]\n filter_cols = filter_shape[1]\n depth_out = filter_shape[3]\n # Check that the input depths are compatible.\n input_shape[3].assert_is_compatible_with(filter_shape[2])\n\n if data_format == b\"NCHW\":\n stride_b, stride_d, stride_r, stride_c = op.get_attr(\"strides\")\n else:\n stride_b, stride_r, stride_c, stride_d = op.get_attr(\"strides\")\n\n if stride_b != 1 or stride_d != 1:\n raise ValueError(\"Current implementation does not yet support \"\n \"strides in the batch and depth dimensions.\")\n # TODO(mrry,shlens): Raise an error if the stride would cause\n # information in the input to be ignored. This will require a change\n # in the kernel implementation.\n padding = op.get_attr(\"padding\")\n out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,\n filter_cols, stride_r, stride_c,\n padding)\n\n output_shape = [batch_size, out_rows, out_cols, depth_out]\n if data_format == b\"NCHW\":\n # Convert output shape back to NCHW.\n output_shape = [output_shape[0], output_shape[3], output_shape[1],\n output_shape[2]]\n return [tensor_shape.TensorShape(output_shape)]\n\n\ndef depthwise_conv2d_native_shape(op):\n \"\"\"Shape function for a DepthwiseConv2D op.\n\n This op has two inputs:\n\n * input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]\n * filter, a 4D tensor with shape = [filter_rows, filter_cols,\n depth_in, depthwise_multiplier]\n\n The output is a 4D tensor with shape = [batch_size, out_rows,\n out_cols, depth_in*depthwise_multiplier], where out_rows and out_cols depend\n on the value of the op's \"padding\" and \"strides\" attrs.\n\n Args:\n op: A DepthwiseConv2dNative Operation.\n\n Returns:\n A list containing the Shape of the DepthwiseConv2DNative output.\n\n Raises:\n ValueError: If the shapes of the input or filter are incompatible.\n \"\"\"\n input_shape = op.inputs[0].get_shape().with_rank(4)\n filter_shape = op.inputs[1].get_shape().with_rank(4)\n\n batch_size = input_shape[0]\n in_rows = input_shape[1]\n in_cols = input_shape[2]\n\n filter_rows = filter_shape[0]\n filter_cols = filter_shape[1]\n depth_out = filter_shape[3] * filter_shape[2]\n # Check that the input depths are compatible.\n input_shape[3].assert_is_compatible_with(filter_shape[2])\n\n stride_b, stride_r, stride_c, stride_d = op.get_attr(\"strides\")\n if stride_b != 1 or stride_d != 1:\n raise ValueError(\"Current implementation does not yet support \"\n \"strides in the batch and depth dimensions.\")\n if stride_r != stride_c:\n # TODO(shlens): Add support for this.\n raise ValueError(\"Current implementation only supports equal length \"\n \"strides in the row and column dimensions.\")\n\n # TODO(mrry,shlens): Raise an error if the stride would cause\n # information in the input to be ignored. This will require a change\n # in the kernel implementation.\n stride = stride_r\n padding = op.get_attr(\"padding\")\n out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,\n filter_cols, stride, stride,\n padding)\n\n return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]\n\n\ndef separable_conv2d_shape(op):\n \"\"\"Shape function for a SeparableConv2D op.\n\n This op has three inputs:\n\n * input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]\n\n * depthwise_filter, a 4D tensor with shape = [filter_rows,\n filter_cols, depth_in, depth_multiplier]\n\n * pointwise_filter, a 4D tensor with shape = [1, 1, depth_in *\n depth_multiplier, depth_out]\n\n The output is a 4D tensor with shape = [batch_size, out_rows,\n out_cols, depth_out], where out_rows and out_cols depend on the\n value of the op's \"padding\" and \"strides\" attrs.\n\n Args:\n op: A SeparableConv2D Operation.\n\n Returns:\n A list containing the Shape of the SeparableConv2D output.\n\n Raises:\n ValueError: If the shapes of the input or filter are incompatible.\n \"\"\"\n input_shape = op.inputs[0].get_shape().with_rank(4)\n depthwise_filter_shape = op.inputs[1].get_shape().merge_with(\n tensor_shape.TensorShape([None, None, input_shape[3], None]))\n pointwise_depth_in = depthwise_filter_shape[2] * depthwise_filter_shape[3]\n\n pointwise_filter_shape = op.inputs[2].get_shape().merge_with(\n tensor_shape.TensorShape([1, 1, pointwise_depth_in, None]))\n\n batch_size = input_shape[0]\n in_rows = input_shape[1]\n in_cols = input_shape[2]\n\n filter_rows = depthwise_filter_shape[0]\n filter_cols = depthwise_filter_shape[1]\n depth_out = pointwise_filter_shape[3]\n\n stride_b, stride_r, stride_c, stride_d = op.get_attr(\"strides\")\n if stride_b != 1 or stride_d != 1:\n raise ValueError(\"Current implementation does not yet support \"\n \"strides in the batch and depth dimensions.\")\n if stride_r != stride_c:\n # TODO(shlens): Add support for this.\n raise ValueError(\"Current implementation only supports equal length \"\n \"strides in the row and column dimensions.\")\n\n # TODO(mrry,shlens): Raise an error if the stride would cause\n # information in the input to be ignored. This will require a change\n # in the kernel implementation.\n stride = stride_r\n padding = op.get_attr(\"padding\")\n out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, filter_rows,\n filter_cols, stride, stride,\n padding)\n\n return [tensor_shape.TensorShape([batch_size, out_rows, out_cols, depth_out])]\n\n\ndef avg_pool_shape(op):\n \"\"\"Shape function for an AvgPool op.\n\n This op has one input:\n\n * input, a 4D tensor with shape = [batch_size, rows, cols, depth]\n\n The output is a 4D tensor with shape = [batch_size, out_rows,\n out_cols, depth_out], where out_rows and out_cols depend on the\n value of the op's \"ksize\", \"strides\", and \"padding\" attrs.\n\n Args:\n op: An AvgPool Operation.\n\n Returns:\n A single-element list containing the Shape of the AvgPool output.\n\n Raises:\n ValueError: If the shape of the input is invalid or incompatible with\n the values of the attrs.\n \"\"\"\n input_shape = op.inputs[0].get_shape().with_rank(4)\n try:\n data_format = op.get_attr(\"data_format\")\n except ValueError:\n data_format = None\n\n if data_format == b\"NCHW\":\n # Convert input shape to the default NHWC for inference.\n input_shape = [input_shape[0], input_shape[2], input_shape[3],\n input_shape[1]]\n\n if data_format == b\"NCHW\":\n ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr(\"ksize\")\n stride_b, stride_d, stride_r, stride_c = op.get_attr(\"strides\")\n else:\n ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr(\"ksize\")\n stride_b, stride_r, stride_c, stride_d = op.get_attr(\"strides\")\n\n batch_size = input_shape[0]\n in_rows = input_shape[1]\n in_cols = input_shape[2]\n depth = input_shape[3]\n\n if ksize_b != 1 or ksize_d != 1:\n raise ValueError(\"Current implementation does not support pooling \"\n \"in the batch and depth dimensions.\")\n if stride_b != 1 or stride_d != 1:\n raise ValueError(\"Current implementation does not support strides \"\n \"in the batch and depth dimensions.\")\n\n # TODO(mrry,shlens): Raise an error if the stride would cause\n # information in the input to be ignored. This will require a change\n # in the kernel implementation.\n padding = op.get_attr(\"padding\")\n\n out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,\n ksize_c, stride_r, stride_c,\n padding)\n\n output_shape = [batch_size, out_rows, out_cols, depth]\n if data_format == b\"NCHW\":\n # Convert output shape back to NCHW.\n output_shape = [output_shape[0], output_shape[3], output_shape[1],\n output_shape[2]]\n return [tensor_shape.TensorShape(output_shape)]\n\n\ndef max_pool_shape(op):\n \"\"\"Shape function for a MaxPool op.\n\n This op has one input:\n\n * input, a 4D tensor with shape = [batch_size, rows, cols, depth_in]\n\n The output is a 4D tensor with shape = [batch_size, out_rows,\n out_cols, depth_out], where out_rows, out_cols, and depth_out depend\n on the value of the op's \"ksize\", \"strides\", and \"padding\" attrs.\n\n Args:\n op: A MaxPool Operation.\n\n Returns:\n A single-element list containing the Shape of the MaxPool output.\n\n Raises:\n ValueError: If the shape of the input is invalid or incompatible with\n the values of the attrs.\n \"\"\"\n input_shape = op.inputs[0].get_shape().with_rank(4)\n try:\n data_format = op.get_attr(\"data_format\")\n except ValueError:\n data_format = None\n\n if data_format == b\"NCHW\":\n # Convert input shape to the default NHWC for inference.\n input_shape = [input_shape[0], input_shape[2], input_shape[3],\n input_shape[1]]\n\n if data_format == b\"NCHW\":\n ksize_b, ksize_d, ksize_r, ksize_c = op.get_attr(\"ksize\")\n stride_b, stride_d, stride_r, stride_c = op.get_attr(\"strides\")\n else:\n ksize_b, ksize_r, ksize_c, ksize_d = op.get_attr(\"ksize\")\n stride_b, stride_r, stride_c, stride_d = op.get_attr(\"strides\")\n\n batch_size = input_shape[0]\n in_rows = input_shape[1]\n in_cols = input_shape[2]\n depth = input_shape[3]\n\n if ksize_b != 1:\n raise ValueError(\"Current implementation does not support pooling \"\n \"in the batch dimension.\")\n if stride_b != 1:\n raise ValueError(\"Current implementation does not support strides \"\n \"in the batch dimension.\")\n\n if not ((ksize_r == 1 and ksize_c == 1) or ksize_d == 1):\n raise ValueError(\"MaxPooling supports exactly one of pooling across depth \"\n \"or pooling across width/height.\")\n\n # TODO(mrry,shlens): Raise an error if the stride would cause\n # information in the input to be ignored. This will require a change\n # in the kernel implementation.\n if ksize_d == 1:\n padding = op.get_attr(\"padding\")\n out_rows, out_cols = get2d_conv_output_size(in_rows, in_cols, ksize_r,\n ksize_c, stride_r, stride_c,\n padding)\n output_shape = [batch_size, out_rows, out_cols, depth]\n else:\n if depth % ksize_d > 0:\n raise ValueError(\"Depthwise max pooling requires the depth window \"\n \"to evenly divide the input depth.\")\n if stride_d != ksize_d:\n raise ValueError(\"Depthwise max pooling requires the depth window \"\n \"to equal the depth stride.\")\n output_shape = [batch_size, in_rows, in_cols, depth // ksize_d]\n\n if data_format == b\"NCHW\":\n # Convert output shape back to NCHW.\n output_shape = [output_shape[0], output_shape[3], output_shape[1],\n output_shape[2]]\n return [tensor_shape.TensorShape(output_shape)]\n\n\ndef no_outputs(unused_op):\n \"\"\"Shape function for use with ops that have no outputs.\"\"\"\n return []\n\n\ndef unknown_shape(op):\n \"\"\"Shape function for use with ops whose output shapes are unknown.\"\"\"\n return [tensor_shape.unknown_shape() for _ in op.outputs]\n\n\ndef broadcast_shape(shape_x, shape_y):\n \"\"\"Returns the broadcasted shape between `shape_x` and `shape_y`.\n\n Args:\n shape_x: A `TensorShape`\n shape_y: A `TensorShape`\n\n Returns:\n A `TensorShape` representing the broadcasted shape.\n\n Raises:\n ValueError: If the two shapes can not be broadcasted.\n \"\"\"\n if shape_x.ndims is None or shape_y.ndims is None:\n return tensor_shape.unknown_shape()\n\n # To compute the broadcasted dimensions, we zip together shape_x and shape_y,\n # and pad with 1 to make them the same length.\n broadcasted_dims = reversed(list(six.moves.zip_longest(\n reversed(shape_x.dims),\n reversed(shape_y.dims),\n fillvalue=tensor_shape.Dimension(1))))\n # Next we combine the dimensions according to the numpy broadcasting rules.\n # http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html\n return_dims = []\n for (dim_x, dim_y) in broadcasted_dims:\n if dim_x.value is None or dim_y.value is None:\n # One or both dimensions is unknown. If either dimension is greater than\n # 1, we assume that the program is correct, and the other dimension will\n # be broadcast to match it.\n # TODO(mrry): If we eliminate the shape checks in C++, we must still\n # assert that the unknown dim is either 1 or the same as the known dim.\n if dim_x.value is not None and dim_x.value > 1:\n return_dims.append(dim_x)\n elif dim_y.value is not None and dim_y.value > 1:\n return_dims.append(dim_y)\n else:\n return_dims.append(None)\n elif dim_x.value == 1:\n # We will broadcast dim_x to dim_y.\n return_dims.append(dim_y)\n elif dim_y.value == 1:\n # We will broadcast dim_y to dim_x.\n return_dims.append(dim_x)\n elif dim_x.value == dim_y.value:\n # The dimensions are compatible, so output is the same size in that\n # dimension.\n return_dims.append(dim_x.merge_with(dim_y))\n else:\n raise ValueError(\"Incompatible shapes for broadcasting: %s and %s\"\n % (shape_x, shape_y))\n return tensor_shape.TensorShape(return_dims)\n\n\ndef call_cpp_shape_fn(op, require_shape_fn=True):\n \"\"\"A shape function that delegates to the registered C++ shape function.\n\n Args:\n op: the node in the graph for which to compute output shapes.\n require_shape_fn: If true, and the C++ shape function is not registered\n in the current binary then an exception is raised; otherwise, if the\n C++ shape function is not registered then unknown_shape is used.\n\n Returns:\n A dictionary with the following keys:\n shapes: A TensorShape list of the output shapes of the op, as computed\n using the C++ shape inference function registered for the op.\n handle_shapes: A TensorShape list of the shapes for handle outputs, if\n any.\n handle_dtypes: A list of DataType enums for the handle outputs, if any.\n\n Raises:\n ValueError: If the C++ shape function returned an error (e.g. because the\n shapes of the inputs are of the wrong rank or otherwise incompatible\n according to the shape function).\n RuntimeError: If the C++ shape function is not registered and\n <require_shape_fn> is True.\n \"\"\"\n if op.type == \"Const\":\n # To avoid serializing large constants, we special-case constant\n # here, even though it has a C++ shape function. When Python\n # calls the C / C-API directly, we should be able to remove this.\n return {\n \"shapes\": [tensor_shape.TensorShape(op.get_attr(\"value\").tensor_shape)],\n \"handle_data\": [None]\n }\n\n input_tensors_needed = []\n input_tensors_as_shapes_needed = []\n\n while True:\n res = _call_cpp_shape_fn_impl(op, input_tensors_needed,\n input_tensors_as_shapes_needed,\n require_shape_fn)\n if not isinstance(res, dict):\n # Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).\n return res\n\n # See if we need to evaluate some inputs.\n if not res[\"inputs_needed\"]:\n return res\n p = cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded()\n p = p.FromString(res[\"inputs_needed\"])\n changed = False\n for idx in p.input_tensors_needed:\n if idx not in input_tensors_needed:\n input_tensors_needed.append(idx)\n changed = True\n for idx in p.input_tensors_as_shapes_needed:\n if idx not in input_tensors_as_shapes_needed:\n input_tensors_as_shapes_needed.append(idx)\n changed = True\n if not changed:\n return res\n\n\ndef _call_cpp_shape_fn_impl(\n op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn):\n \"\"\"Core implementaton of call_cpp_shape_fn.\"\"\"\n graph_def_version = op.graph.graph_def_versions.producer\n node_def_str = op.node_def.SerializeToString()\n\n def tensor_to_inference_result(t):\n r = cpp_shape_inference_pb2.CppShapeInferenceResult()\n r.shape.CopyFrom(t.get_shape().as_proto())\n # pylint: disable=protected-access\n if t._handle_data is not None:\n r.handle_data.CopyFrom(t._handle_data)\n # pylint: enable=protected-access\n return r.SerializeToString()\n input_shapes = [tensor_to_inference_result(i) for i in op.inputs]\n\n input_tensors = [None for i in input_shapes]\n for idx in input_tensors_needed:\n v = tensor_util.constant_value(op.inputs[idx])\n if v is not None:\n input_tensors[idx] = np.asarray(v)\n\n serialized_unknown_shape = (\n tensor_shape.TensorShape(None).as_proto().SerializeToString())\n arr = [serialized_unknown_shape for i in input_shapes]\n for idx in input_tensors_as_shapes_needed:\n s = tensor_util.constant_value_as_shape(op.inputs[idx])\n if s is not None:\n arr[idx] = s.as_proto().SerializeToString()\n input_tensors_as_shapes = arr\n\n missing_shape_fn = False\n try:\n with errors.raise_exception_on_not_ok_status() as status:\n output = pywrap_tensorflow.RunCppShapeInference(\n graph_def_version, node_def_str, input_shapes, input_tensors,\n input_tensors_as_shapes, status)\n except errors.InvalidArgumentError as err:\n if err.message.startswith(\"No shape inference function exists for op\"):\n missing_shape_fn = True\n else:\n raise ValueError(err.message)\n\n if missing_shape_fn:\n if require_shape_fn:\n raise RuntimeError(\n \"No C++ shape function registered for standard op: %s\" % op.type)\n return unknown_shape(op)\n\n output_shapes = output[:-1]\n\n # Convert TensorShapeProto values in output_shapes.\n result_protos = [\n cpp_shape_inference_pb2.CppShapeInferenceResult().FromString(s)\n for s in output_shapes\n ]\n result = [r.shape for r in result_protos]\n result_handle_data = [\n r.handle_data if r.handle_data.is_set else None for r in result_protos\n ]\n\n return {\n \"shapes\": result,\n \"handle_data\": result_handle_data,\n \"inputs_needed\": output[-1]\n }\n\n# pylint: disable=protected-access\nops._set_call_cpp_shape_fn(call_cpp_shape_fn)\n# pylint: enable=protected-access\n",
"# pylint: disable=g-bad-file-header\n# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"A wrapper of Session API which runs hooks.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport abc\n\nfrom tensorflow.core.protobuf import config_pb2\nfrom tensorflow.python.framework import errors\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import control_flow_ops\nfrom tensorflow.python.ops import lookup_ops\nfrom tensorflow.python.ops import resources\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.summary import summary\nfrom tensorflow.python.training import basic_session_run_hooks\nfrom tensorflow.python.training import coordinator\nfrom tensorflow.python.training import queue_runner\nfrom tensorflow.python.training import saver as training_saver\nfrom tensorflow.python.training import session_manager as sm\nfrom tensorflow.python.training import session_run_hook\n\n\n# The list of exceptions that we should recover from. Exceptions not in this\n# list may terminate the job.\n_PREEMPTION_ERRORS = (errors.AbortedError, errors.UnavailableError)\n\n\n# TODO(touts): Share that with the Supervisor.\nclass Scaffold(object):\n \"\"\"Structure to create or gather pieces commonly needed to train a model.\n\n When you build a model for training you usually need ops to initialize\n variables, a `Saver` to checkpoint them, an op to collect summaries for\n the visualizer, and so on.\n\n Various libraries built on top of the core TensorFlow library take care of\n creating some or all of these pieces and storing them in well known\n collections in the graph. The `Scaffold` class helps pick these pieces from\n the graph collections, creating and adding them to the collections if needed.\n\n If you call the scaffold constructor without any arguments, it will pick\n pieces from the collections, creating default ones if needed when\n `scaffold.finalize()` is called. You can pass arguments to the constructor to\n provide your own pieces. Pieces that you pass to the constructor are not\n added to the graph collections.\n\n The following pieces are directly accessible as attributes of the `Scaffold`\n object:\n\n * `saver`: A `tf.train.Saver` object taking care of saving the variables.\n Picked from and stored into the `SAVERS` collection in the graph by default.\n * `init_op`: An op to run to initialize the variables. Picked from and\n stored into the `INIT_OP` collection in the graph by default.\n * `ready_op`: An op to verify that the variables are initialized. Picked\n from and stored into the `READY_OP` collection in the graph by default.\n * `ready_for_local_init_op`: An op to verify that global state has been\n initialized and it is alright to run `local_init_op`. Picked from and\n stored into the `READY_FOR_LOCAL_INIT_OP` collection in the graph by\n default. This is needed when the initialization of local variables depends\n on the values of global variables.\n * `local_init_op`: An op to initialize the local variables. Picked\n from and stored into the `LOCAL_INIT_OP` collection in the graph by default.\n * `summary_op`: An op to run and merge the summaries in the graph. Picked\n from and stored into the `SUMMARY_OP` collection in the graph by default.\n * `global_step`: A tensor containing the global step counter. Picked\n from and stored into the `GLOBAL_STEP` collection in the graph by default.\n\n You can also pass the following additional pieces to the constructor:\n\n * `init_feed_dict`: A session feed dictionary that should be used when\n running the init op.\n * `init_fn`: A callable to run run after the init op to perform additional\n initializations. The callable will be called as\n `init_fn(scaffold, session)`.\n\n \"\"\"\n\n def __init__(self,\n init_op=None,\n init_feed_dict=None,\n init_fn=None,\n ready_op=None,\n ready_for_local_init_op=None,\n local_init_op=None,\n summary_op=None,\n saver=None,\n copy_from_scaffold=None):\n \"\"\"Create a scaffold.\n\n Args:\n init_op: Optional op for initializing variables.\n init_feed_dict: Optional session feed dictionary to use when running the\n init_op.\n init_fn: Optional function to use to initialize the model after running\n the init_op. Will be called as `init_fn(scaffold, session)`.\n ready_op: Optional op to verify that the variables are initialized. Must\n return an empty 1D string tensor when the variables are initialized, or\n a non-empty 1D string tensor listing the names of the non-initialized\n variables.\n ready_for_local_init_op: Optional op to verify that the global variables\n are initialized and `local_init_op` can be run. Must return an empty\n 1D string tensor when the global variables are initialized, or a\n non-empty 1D string tensor listing the names of the non-initialized\n global variables.\n local_init_op: Optional op to initialize local variables.\n summary_op: Optional op to gather all summaries. Must return a scalar\n string tensor containing a serialized `Summary` proto.\n saver: Optional `tf.train.Saver` object to use to save and restore\n variables.\n copy_from_scaffold: Optional scaffold object to copy fields from. Its\n fields will be overwritten by the provided fields in this function.\n \"\"\"\n if copy_from_scaffold is not None:\n if not isinstance(copy_from_scaffold, Scaffold):\n raise TypeError('copy_from_scaffold is not a Scaffold instance.')\n # We need _coalesce since Tensor is not converted to bool automatically,\n # so the common idiom of (a or b) does not work.\n coalesce = lambda a, b: a if a is not None else b\n init_op = coalesce(init_op, copy_from_scaffold.init_op)\n init_feed_dict = coalesce(init_feed_dict,\n copy_from_scaffold.init_feed_dict)\n # Use the original init_fn provided by the user to init the new Scaffold.\n init_fn = coalesce(init_fn, copy_from_scaffold._user_init_fn) # pylint: disable=protected-access\n ready_op = coalesce(ready_op, copy_from_scaffold.ready_op)\n ready_for_local_init_op = coalesce(\n ready_for_local_init_op, copy_from_scaffold.ready_for_local_init_op)\n local_init_op = coalesce(local_init_op, copy_from_scaffold.local_init_op)\n summary_op = coalesce(summary_op, copy_from_scaffold.summary_op)\n saver = coalesce(saver, copy_from_scaffold.saver)\n\n # NOTE(touts): modifying the init function to be passed the scaffold is a\n # hack to make it easy to find the saver. Is there a better way?\n self._user_init_fn = init_fn\n if init_fn:\n self._init_fn = lambda sess: init_fn(self, sess)\n else:\n self._init_fn = None\n\n self._init_op = init_op\n self._init_feed_dict = init_feed_dict\n self._ready_op = ready_op\n self._ready_for_local_init_op = ready_for_local_init_op\n self._local_init_op = local_init_op\n self._summary_op = summary_op\n self._saver = saver\n\n def finalize(self):\n \"\"\"Creates operations if needed and finalizes the graph.\"\"\"\n if self._init_op is None:\n def default_init_op():\n return control_flow_ops.group(\n variables.global_variables_initializer(),\n resources.initialize_resources(resources.shared_resources()))\n self._init_op = Scaffold.get_or_default(\n 'init_op',\n ops.GraphKeys.INIT_OP,\n default_init_op)\n if self._ready_op is None:\n def default_ready_op():\n return array_ops.concat([\n variables.report_uninitialized_variables(),\n resources.report_uninitialized_resources()\n ], 0)\n self._ready_op = Scaffold.get_or_default(\n 'ready_op', ops.GraphKeys.READY_OP,\n default_ready_op)\n if self._ready_for_local_init_op is None:\n def default_ready_for_local_init_op():\n return variables.report_uninitialized_variables(\n variables.global_variables())\n self._ready_for_local_init_op = Scaffold.get_or_default(\n 'ready_for_local_init_op', ops.GraphKeys.READY_FOR_LOCAL_INIT_OP,\n default_ready_for_local_init_op)\n if self._local_init_op is None:\n self._local_init_op = Scaffold.get_or_default(\n 'local_init_op', ops.GraphKeys.LOCAL_INIT_OP,\n Scaffold._default_local_init_op)\n if self._summary_op is None:\n self._summary_op = Scaffold.get_or_default('summary_op',\n ops.GraphKeys.SUMMARY_OP,\n summary.merge_all)\n # pylint: disable=g-long-lambda\n if self._saver is None:\n self._saver = training_saver._get_saver_or_default() # pylint: disable=protected-access\n # pylint: enable=g-long-lambda\n self._saver.build()\n\n ops.get_default_graph().finalize()\n return self\n\n @property\n def init_fn(self):\n return self._init_fn\n\n @property\n def init_op(self):\n return self._init_op\n\n @property\n def ready_op(self):\n return self._ready_op\n\n @property\n def ready_for_local_init_op(self):\n return self._ready_for_local_init_op\n\n @property\n def local_init_op(self):\n return self._local_init_op\n\n @property\n def summary_op(self):\n return self._summary_op\n\n @property\n def saver(self):\n return self._saver\n\n @property\n def init_feed_dict(self):\n return self._init_feed_dict\n\n @staticmethod\n def get_or_default(arg_name, collection_key, default_constructor):\n \"\"\"Get from cache or create a default operation.\"\"\"\n elements = ops.get_collection(collection_key)\n if elements:\n if len(elements) > 1:\n raise RuntimeError('More than one item in the collection \"%s\". '\n 'Please indicate which one to use by passing it to '\n 'the tf.Scaffold constructor as: '\n 'tf.Scaffold(%s=item to use)', collection_key,\n arg_name)\n return elements[0]\n op = default_constructor()\n if op is not None:\n ops.add_to_collection(collection_key, op)\n return op\n\n @staticmethod\n def _default_local_init_op():\n return control_flow_ops.group(variables.local_variables_initializer(),\n lookup_ops.tables_initializer())\n\n\ndef MonitoredTrainingSession(master='', # pylint: disable=invalid-name\n is_chief=True,\n checkpoint_dir=None,\n scaffold=None,\n hooks=None,\n chief_only_hooks=None,\n save_checkpoint_secs=600,\n save_summaries_steps=100,\n save_summaries_secs=None,\n config=None,\n stop_grace_period_secs=120,\n log_step_count_steps=100):\n \"\"\"Creates a `MonitoredSession` for training.\n\n For a chief, this utility sets proper session initializer/restorer. It also\n creates hooks related to checkpoint and summary saving. For workers, this\n utility sets proper session creator which waits for the chief to\n initialize/restore. Please check `tf.train.MonitoredSession` for more\n information.\n\n\n Args:\n master: `String` the TensorFlow master to use.\n is_chief: If `True`, it will take care of initialization and recovery the\n underlying TensorFlow session. If `False`, it will wait on a chief to\n initialize or recover the TensorFlow session.\n checkpoint_dir: A string. Optional path to a directory where to restore\n variables.\n scaffold: A `Scaffold` used for gathering or building supportive ops. If\n not specified, a default one is created. It's used to finalize the graph.\n hooks: Optional list of `SessionRunHook` objects.\n chief_only_hooks: list of `SessionRunHook` objects. Activate these hooks if\n `is_chief==True`, ignore otherwise.\n save_checkpoint_secs: The frequency, in seconds, that a checkpoint is saved\n using a default checkpoint saver. If `save_checkpoint_secs` is set to\n `None`, then the default checkpoint saver isn't used.\n save_summaries_steps: The frequency, in number of global steps, that the\n summaries are written to disk using a default summary saver. If both\n `save_summaries_steps` and `save_summaries_secs` are set to `None`, then\n the default summary saver isn't used.\n save_summaries_secs: The frequency, in secs, that the summaries are written\n to disk using a default summary saver. If both `save_summaries_steps` and\n `save_summaries_secs` are set to `None`, then the default summary saver\n isn't used.\n config: an instance of `tf.ConfigProto` proto used to configure the session.\n It's the `config` argument of constructor of `tf.Session`.\n stop_grace_period_secs: Number of seconds given to threads to stop after\n `close()` has been called.\n log_step_count_steps: The frequency, in number of global steps, that the\n global step/sec is logged.\n\n Returns:\n A `MonitoredSession` object.\n \"\"\"\n scaffold = scaffold or Scaffold()\n if not is_chief:\n session_creator = WorkerSessionCreator(\n scaffold=scaffold, master=master, config=config)\n return MonitoredSession(session_creator=session_creator, hooks=hooks or [],\n stop_grace_period_secs=stop_grace_period_secs)\n\n all_hooks = []\n if chief_only_hooks:\n all_hooks.extend(chief_only_hooks)\n session_creator = ChiefSessionCreator(\n scaffold=scaffold,\n checkpoint_dir=checkpoint_dir,\n master=master,\n config=config)\n\n if checkpoint_dir:\n all_hooks.append(basic_session_run_hooks.StepCounterHook(\n output_dir=checkpoint_dir, every_n_steps=log_step_count_steps))\n\n if (save_summaries_steps and save_summaries_steps > 0) or (\n save_summaries_secs and save_summaries_secs > 0):\n all_hooks.append(basic_session_run_hooks.SummarySaverHook(\n scaffold=scaffold,\n save_steps=save_summaries_steps,\n save_secs=save_summaries_secs,\n output_dir=checkpoint_dir))\n if save_checkpoint_secs and save_checkpoint_secs > 0:\n all_hooks.append(basic_session_run_hooks.CheckpointSaverHook(\n checkpoint_dir, save_secs=save_checkpoint_secs, scaffold=scaffold))\n\n if hooks:\n all_hooks.extend(hooks)\n return MonitoredSession(session_creator=session_creator, hooks=all_hooks,\n stop_grace_period_secs=stop_grace_period_secs)\n\n\nclass SessionCreator(object):\n \"\"\"A factory for tf.Session.\"\"\"\n\n @abc.abstractmethod\n def create_session(self):\n raise NotImplementedError(\n 'create_session is not implemented for {}.'.format(self))\n\n\nclass ChiefSessionCreator(SessionCreator):\n \"\"\"Creates a tf.Session for a chief.\"\"\"\n\n def __init__(self,\n scaffold=None,\n master='',\n config=None,\n checkpoint_dir=None,\n checkpoint_filename_with_path=None):\n \"\"\"Initializes a chief session creator.\n\n Args:\n scaffold: A `Scaffold` used for gathering or building supportive ops. If\n not specified a default one is created. It's used to finalize the graph.\n master: `String` representation of the TensorFlow master to use.\n config: `ConfigProto` proto used to configure the session.\n checkpoint_dir: A string. Optional path to a directory where to restore\n variables.\n checkpoint_filename_with_path: Full file name path to the checkpoint file.\n \"\"\"\n self._checkpoint_dir = checkpoint_dir\n self._checkpoint_filename_with_path = checkpoint_filename_with_path\n self._scaffold = scaffold or Scaffold()\n self._session_manager = None\n self._master = master\n self._config = config\n\n def _get_session_manager(self):\n if self._session_manager:\n return self._session_manager\n\n self._session_manager = sm.SessionManager(\n local_init_op=self._scaffold.local_init_op,\n ready_op=self._scaffold.ready_op,\n ready_for_local_init_op=self._scaffold.ready_for_local_init_op,\n graph=ops.get_default_graph())\n return self._session_manager\n\n def create_session(self):\n self._scaffold.finalize()\n return self._get_session_manager().prepare_session(\n self._master,\n saver=self._scaffold.saver,\n checkpoint_dir=self._checkpoint_dir,\n checkpoint_filename_with_path=self._checkpoint_filename_with_path,\n config=self._config,\n init_op=self._scaffold.init_op,\n init_feed_dict=self._scaffold.init_feed_dict,\n init_fn=self._scaffold.init_fn)\n\n\nclass WorkerSessionCreator(SessionCreator):\n \"\"\"Creates a tf.Session for a worker.\"\"\"\n\n def __init__(self, scaffold=None, master='', config=None):\n \"\"\"Initializes a worker session creator.\n\n Args:\n scaffold: A `Scaffold` used for gathering or building supportive ops. If\n not specified a default one is created. It's used to finalize the graph.\n master: `String` representation of the TensorFlow master to use.\n config: `ConfigProto` proto used to configure the session.\n \"\"\"\n self._scaffold = scaffold or Scaffold()\n self._session_manager = None\n self._master = master\n self._config = config\n\n def _get_session_manager(self):\n if self._session_manager:\n return self._session_manager\n\n self._session_manager = sm.SessionManager(\n local_init_op=self._scaffold.local_init_op,\n ready_op=self._scaffold.ready_op,\n ready_for_local_init_op=self._scaffold.ready_for_local_init_op,\n graph=ops.get_default_graph())\n return self._session_manager\n\n def create_session(self):\n self._scaffold.finalize()\n return self._get_session_manager().wait_for_session(\n self._master, config=self._config,\n max_wait_secs=30 * 60 # Wait up to 30 mins for the session to be ready.\n )\n\n\nclass _MonitoredSession(object):\n \"\"\"See `MonitoredSession` or `SingularMonitoredSession`.\"\"\"\n\n def __init__(self, session_creator, hooks, should_recover,\n stop_grace_period_secs=120):\n \"\"\"Sets up a Monitored or Hooked Session.\n\n Args:\n session_creator: A factory object to create session. Typically a\n `ChiefSessionCreator` or a `WorkerSessionCreator`.\n hooks: An iterable of `SessionRunHook' objects.\n should_recover: A bool. Indicates whether to recover from `AbortedError`\n and `UnavailableError` or not.\n stop_grace_period_secs: Number of seconds given to threads to stop after\n `close()` has been called.\n \"\"\"\n self._graph_was_finalized = ops.get_default_graph().finalized\n self._hooks = hooks or []\n for h in self._hooks:\n h.begin()\n # Create the session.\n self._coordinated_creator = self._CoordinatedSessionCreator(\n session_creator=session_creator or ChiefSessionCreator(),\n hooks=self._hooks,\n stop_grace_period_secs=stop_grace_period_secs)\n if should_recover:\n self._sess = _RecoverableSession(self._coordinated_creator)\n else:\n self._sess = self._coordinated_creator.create_session()\n\n @property\n def graph(self):\n \"\"\"The graph that was launched in this session.\"\"\"\n if self._tf_sess() is None:\n return None\n return self._tf_sess().graph\n\n def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n \"\"\"Run ops in the monitored session.\n\n This method is completely compatible with the `tf.Session.run()` method.\n\n Args:\n fetches: Same as `tf.Session.run()`.\n feed_dict: Same as `tf.Session.run()`.\n options: Same as `tf.Session.run()`.\n run_metadata: Same as `tf.Session.run()`.\n\n Returns:\n Same as `tf.Session.run()`.\n \"\"\"\n return self._sess.run(fetches,\n feed_dict=feed_dict,\n options=options,\n run_metadata=run_metadata)\n\n def should_stop(self):\n if self._sess:\n return self._sess.should_stop()\n return True\n\n def close(self):\n self._close_internal()\n\n def __enter__(self):\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n if exception_type in [errors.OutOfRangeError, StopIteration]:\n exception_type = None\n self._close_internal(exception_type)\n # __exit__ should return True to suppress an exception.\n return exception_type is None\n\n class _CoordinatedSessionCreator(object):\n \"\"\"Factory for the _RecoverableSession.\"\"\"\n\n def __init__(self, session_creator, hooks, stop_grace_period_secs):\n self._session_creator = session_creator\n self._hooks = hooks\n self.coord = None\n self.tf_sess = None\n self._stop_grace_period_secs = stop_grace_period_secs\n\n def create_session(self):\n \"\"\"Creates a coordinated session.\"\"\"\n # Keep the tf_sess for unit testing.\n self.tf_sess = self._session_creator.create_session()\n # We don't want coordinator to suppress any exception.\n self.coord = coordinator.Coordinator(clean_stop_exception_types=[])\n queue_runner.start_queue_runners(sess=self.tf_sess, coord=self.coord)\n # Inform the hooks that a new session has been created.\n for hook in self._hooks:\n hook.after_create_session(self.tf_sess, self.coord)\n return _CoordinatedSession(\n _HookedSession(self.tf_sess, self._hooks), self.coord,\n self._stop_grace_period_secs)\n\n def _close_internal(self, exception_type=None):\n try:\n if not exception_type:\n for h in self._hooks:\n h.end(self._coordinated_creator.tf_sess)\n finally:\n try:\n self._sess.close()\n finally:\n self._sess = None\n self._coordinated_creator.tf_sess = None\n self._coordinated_creator.coord = None\n if not self._graph_was_finalized:\n ops.get_default_graph()._unsafe_unfinalize() # pylint: disable=protected-access\n\n def _is_closed(self):\n \"\"\"Return True if the supervised session is closed. For tests only.\n\n Returns:\n A boolean.\n \"\"\"\n return self._coordinated_creator.tf_sess is None\n\n def _tf_sess(self):\n return self._coordinated_creator.tf_sess\n\n\nclass MonitoredSession(_MonitoredSession):\n \"\"\"Session-like object that handles initialization, recovery and hooks.\n\n Example usage:\n\n ```python\n saver_hook = CheckpointSaverHook(...)\n summary_hook = SummarySaverHook(...)\n with MonitoredSession(session_creator=ChiefSessionCreator(...),\n hooks=[saver_hook, summary_hook]) as sess:\n while not sess.should_stop():\n sess.run(train_op)\n ```\n\n Initialization: At creation time the monitored session does following things\n in given order:\n\n * calls `hook.begin()` for each given hook\n * finalizes the graph via `scaffold.finalize()`\n * create session\n * initializes the model via initialization ops provided by `Scaffold`\n * restores variables if a checkpoint exists\n * launches queue runners\n * calls `hook.after_create_session()`\n\n Run: When `run()` is called, the monitored session does following things:\n\n * calls `hook.before_run()`\n * calls TensorFlow `session.run()` with merged fetches and feed_dict\n * calls `hook.after_run()`\n * returns result of `session.run()` asked by user\n * if `AbortedError` or `UnavailableError` occurs, it recovers or\n reinitializes the session before executing the run() call again\n\n\n Exit: At the `close()`, the monitored session does following things in order:\n\n * calls `hook.end()`\n * closes the queue runners and the session\n * suppresses `OutOfRange` error which indicates that all inputs have been\n processed if the monitored_session is used as a context\n\n How to set `tf.Session` arguments:\n\n * In most cases you can set session arguments as follows:\n\n ```python\n MonitoredSession(\n session_creator=ChiefSessionCreator(master=..., config=...))\n ```\n\n * In distributed setting for a non-chief worker, you can use following:\n\n ```python\n MonitoredSession(\n session_creator=WorkerSessionCreator(master=..., config=...))\n ```\n\n See `MonitoredTrainingSession` for an example usage based on chief or worker.\n\n Note: This is not a `tf.Session`. For example, it cannot do following:\n\n * it cannot be set as default session.\n * it cannot be sent to saver.save.\n * it cannot be sent to tf.train.start_queue_runners.\n\n Args:\n session_creator: A factory object to create session. Typically a\n `ChiefSessionCreator` which is the default one.\n hooks: An iterable of `SessionRunHook' objects.\n\n Returns:\n A MonitoredSession object.\n \"\"\"\n\n def __init__(self, session_creator=None, hooks=None,\n stop_grace_period_secs=120):\n super(MonitoredSession, self).__init__(\n session_creator, hooks, should_recover=True,\n stop_grace_period_secs=stop_grace_period_secs)\n\n\nclass SingularMonitoredSession(_MonitoredSession):\n \"\"\"Session-like object that handles initialization, restoring, and hooks.\n\n Please note that this utility is not recommended for distributed settings.\n For distributed settings, please use `tf.train.MonitoredSession`. The\n differences between `MonitoredSession` and `SingularMonitoredSession` are:\n\n * `MonitoredSession` handles `AbortedError` and `UnavailableError` for\n distributed settings, but `SingularMonitoredSession` does not.\n * `MonitoredSession` can be created in `chief` or `worker` modes.\n `SingularMonitoredSession` is always created as `chief`.\n * You can access the raw `tf.Session` object used by\n `SingularMonitoredSession`, whereas in MonitoredSession the raw session is\n private. This can be used:\n - To `run` without hooks.\n - To save and restore.\n * All other functionality is identical.\n\n Example usage:\n ```python\n saver_hook = CheckpointSaverHook(...)\n summary_hook = SummarySaverHook(...)\n with SingularMonitoredSession(hooks=[saver_hook, summary_hook]) as sess:\n while not sess.should_stop():\n sess.run(train_op)\n ```\n\n Initialization: At creation time the hooked session does following things\n in given order:\n\n * calls `hook.begin()` for each given hook\n * finalizes the graph via `scaffold.finalize()`\n * create session\n * initializes the model via initialization ops provided by `Scaffold`\n * restores variables if a checkpoint exists\n * launches queue runners\n\n Run: When `run()` is called, the hooked session does following things:\n\n * calls `hook.before_run()`\n * calls TensorFlow `session.run()` with merged fetches and feed_dict\n * calls `hook.after_run()`\n * returns result of `session.run()` asked by user\n\n Exit: At the `close()`, the hooked session does following things in order:\n\n * calls `hook.end()`\n * closes the queue runners and the session\n * suppresses `OutOfRange` error which indicates that all inputs have been\n processed if the `SingularMonitoredSession` is used as a context.\n \"\"\"\n\n def __init__(self,\n hooks=None,\n scaffold=None,\n master='',\n config=None,\n checkpoint_dir=None,\n stop_grace_period_secs=120):\n \"\"\"Creates a SingularMonitoredSession.\n\n Args:\n hooks: An iterable of `SessionRunHook' objects.\n scaffold: A `Scaffold` used for gathering or building supportive ops. If\n not specified a default one is created. It's used to finalize the graph.\n master: `String` representation of the TensorFlow master to use.\n config: `ConfigProto` proto used to configure the session.\n checkpoint_dir: A string. Optional path to a directory where to restore\n variables.\n stop_grace_period_secs: Number of seconds given to threads to stop after\n `close()` has been called.\n \"\"\"\n session_creator = ChiefSessionCreator(\n scaffold=scaffold,\n master=master,\n config=config,\n checkpoint_dir=checkpoint_dir)\n super(SingularMonitoredSession, self).__init__(\n session_creator, hooks, should_recover=False,\n stop_grace_period_secs=stop_grace_period_secs)\n\n def raw_session(self):\n \"\"\"Returns underlying `TensorFlow.Session` object.\"\"\"\n return self._tf_sess()\n\n\nclass _WrappedSession(object):\n \"\"\"Wrapper around a `tf.Session`.\n\n This wrapper is used as a base class for various session wrappers\n that provide additional functionality such as monitoring, coordination,\n and recovery.\n\n In addition to the methods exported by `SessionInterface` the wrapper\n provides a method to check for stop and never raises exceptions from\n calls to `close()`.\n \"\"\"\n\n def __init__(self, sess):\n \"\"\"Creates a `_WrappedSession`.\n\n Args:\n sess: A `tf.Session` or `_WrappedSession` object. The wrapped session.\n \"\"\"\n self._sess = sess\n self._wrapped_is_stoppable = isinstance(self._sess, _WrappedSession)\n\n @property\n def graph(self):\n return self._sess.graph\n\n @property\n def sess_str(self):\n return self._sess.sess_str\n\n def should_stop(self):\n \"\"\"Return true if this session should not be used anymore.\n\n Always return True if the session was closed.\n\n Returns:\n True if the session should stop, False otherwise.\n \"\"\"\n if self._check_stop():\n return True\n if self._sess:\n return self._wrapped_is_stoppable and self._sess.should_stop()\n return True\n\n def _check_stop(self):\n \"\"\"Hook for subclasses to provide their own stop condition.\n\n Returns:\n True if the session should stop, False otherwise.\n \"\"\"\n return False\n\n def close(self):\n if self._sess:\n try:\n self._sess.close()\n except _PREEMPTION_ERRORS:\n pass\n finally:\n self._sess = None\n\n def run(self, *args, **kwargs):\n return self._sess.run(*args, **kwargs)\n\n\nclass _RecoverableSession(_WrappedSession):\n \"\"\"A wrapped session that recreates a session upon certain kinds of errors.\n\n The constructor is passed a SessionCreator object, not a session.\n\n Calls to `run()` are delegated to the wrapped session. If a call raises the\n exception `tf.errors.AbortedError` or `tf.errors.UnavailableError`, the\n wrapped session is closed, and a new one is created by calling the factory\n again.\n \"\"\"\n\n def __init__(self, sess_creator):\n \"\"\"Create a new `_RecoverableSession`.\n\n The value returned by calling `sess_creator.create_session()` will be the\n session wrapped by this recoverable session.\n\n Args:\n sess_creator: A 'SessionCreator' to be wrapped by recoverable.\n \"\"\"\n self._sess_creator = sess_creator\n _WrappedSession.__init__(self, self._create_session())\n\n def _create_session(self):\n while True:\n try:\n return self._sess_creator.create_session()\n except _PREEMPTION_ERRORS as e:\n logging.info('An error was raised while a session was being created. '\n 'This may be due to a preemption of a connected worker '\n 'or parameter server. A new session will be created. '\n 'Error: %s', e)\n\n def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n while True:\n try:\n if not self._sess:\n self._sess = self._create_session()\n return self._sess.run(fetches,\n feed_dict=feed_dict,\n options=options,\n run_metadata=run_metadata)\n except _PREEMPTION_ERRORS as e:\n logging.info('An error was raised. This may be due to a preemption in '\n 'a connected worker or parameter server. The current '\n 'session will be closed and a new session will be '\n 'created. Error: %s', e)\n self.close()\n self._sess = None\n\n\nclass _CoordinatedSession(_WrappedSession):\n \"\"\"A wrapped session that works with a `tf.Coordinator`.\n\n Calls to `run()` are delegated to the wrapped session. If a call\n raises an exception, the exception is reported to the coordinator.\n\n In addition, after each call to `run()` this session ask the coordinator if\n the session should stop. In that case it will will join all the threads\n registered with the coordinator before returning.\n\n If the coordinator was requested to stop with an exception, that exception\n will be re-raised from the call to `run()`.\n \"\"\"\n\n def __init__(self, sess, coord, stop_grace_period_secs=120):\n \"\"\"Create a new `_CoordinatedSession`.\n\n Args:\n sess: A `tf.Session` object. The wrapped session.\n coord: A `tf.train.Coordinator` object.\n stop_grace_period_secs: Number of seconds given to threads to stop after\n `close()` has been called.\n \"\"\"\n _WrappedSession.__init__(self, sess)\n self._coord = coord\n self._stop_grace_period_secs = stop_grace_period_secs\n\n def _check_stop(self):\n # Check with the coordinator if we should stop.\n return self._coord.should_stop()\n\n def close(self):\n self._coord.request_stop()\n try:\n self._coord.join(\n stop_grace_period_secs=self._stop_grace_period_secs,\n ignore_live_threads=True)\n finally:\n try:\n _WrappedSession.close(self)\n except Exception: # pylint: disable=broad-except\n # We intentionally suppress exceptions from the close() here since\n # useful exceptions are already reported by join().\n pass\n\n\nclass _HookedSession(_WrappedSession):\n \"\"\"A _WrappedSession that calls hooks during calls to run().\n\n The list of hooks to call is passed in the constructor. Before each call\n to `run()` the session calls the `before_run()` method of the hooks, which\n can return additional ops or tensors to run. These are added to the arguments\n of the call to `run()`.\n\n When the `run()` call finishes, the session calls the `after_run()` methods of\n the hooks, passing the values returned by the `run()` call corresponding to\n the ops and tensors that each hook requested.\n\n If any call to the hooks, requests stop via run_context the session will be\n marked as needing to stop and its `should_stop()` method will now return\n `True`.\n \"\"\"\n\n def __init__(self, sess, hooks):\n \"\"\"Initializes a _HookedSession object.\n\n Args:\n sess: A `tf.Session` or a `_WrappedSession` object.\n hooks: An iterable of `SessionRunHook' objects.\n \"\"\"\n\n _WrappedSession.__init__(self, sess)\n self._hooks = hooks\n self._should_stop = False\n\n def _check_stop(self):\n \"\"\"See base class.\"\"\"\n return self._should_stop\n\n def run(self, fetches, feed_dict=None, options=None, run_metadata=None):\n \"\"\"See base class.\"\"\"\n if self.should_stop():\n raise RuntimeError('Run called even after should_stop requested.')\n\n actual_fetches = {'caller': fetches}\n\n run_context = session_run_hook.SessionRunContext(\n original_args=session_run_hook.SessionRunArgs(fetches, feed_dict),\n session=self._sess)\n\n options = options or config_pb2.RunOptions()\n feed_dict = self._call_hook_before_run(run_context, actual_fetches,\n feed_dict, options)\n\n # Do session run.\n run_metadata = run_metadata or config_pb2.RunMetadata()\n outputs = _WrappedSession.run(self,\n fetches=actual_fetches,\n feed_dict=feed_dict,\n options=options,\n run_metadata=run_metadata)\n\n for hook in self._hooks:\n hook.after_run(\n run_context,\n session_run_hook.SessionRunValues(\n results=outputs[hook] if hook in outputs else None,\n options=options,\n run_metadata=run_metadata))\n self._should_stop = self._should_stop or run_context.stop_requested\n\n return outputs['caller']\n\n def _call_hook_before_run(self, run_context, fetch_dict, user_feed_dict,\n options):\n \"\"\"Calls hooks.before_run and handles requests from hooks.\"\"\"\n hook_feeds = {}\n for hook in self._hooks:\n request = hook.before_run(run_context)\n if request is not None:\n if request.fetches is not None:\n fetch_dict[hook] = request.fetches\n if request.feed_dict:\n self._raise_if_feeds_intersects(\n hook_feeds, request.feed_dict,\n 'Same tensor is fed by two hooks.')\n hook_feeds.update(request.feed_dict)\n if request.options:\n self._merge_run_options(options, request.options)\n\n if not hook_feeds:\n return user_feed_dict\n\n if not user_feed_dict:\n return hook_feeds\n\n self._raise_if_feeds_intersects(\n user_feed_dict, hook_feeds,\n 'Same tensor is fed by a SessionRunHook and user.')\n hook_feeds.update(user_feed_dict)\n return hook_feeds\n\n def _raise_if_feeds_intersects(self, feeds1, feeds2, message):\n intersection = set(feeds1.keys()) & set(feeds2.keys())\n if intersection:\n raise RuntimeError(message + ' Conflict(s): ' + str(list(intersection)))\n\n def _merge_run_options(self, options, incoming_options):\n \"\"\"Merge two instances of RunOptions into the first one.\n\n During the merger, the numerical fields including trace_level,\n timeout_in_ms, inter_op_thread_pool are set to the larger one of the two.\n The boolean value is set to the logical OR of the two.\n debug_tensor_watch_opts of the original options is extended with that from\n the incoming one.\n\n Args:\n options: The options to merge into.\n incoming_options: The options to be merged into the first argument.\n \"\"\"\n options.trace_level = max(options.trace_level, incoming_options.trace_level)\n options.timeout_in_ms = max(options.timeout_in_ms,\n incoming_options.timeout_in_ms)\n options.inter_op_thread_pool = max(options.inter_op_thread_pool,\n incoming_options.inter_op_thread_pool)\n options.output_partition_graphs = max(\n options.output_partition_graphs,\n incoming_options.output_partition_graphs)\n\n options.debug_options.debug_tensor_watch_opts.extend(\n incoming_options.debug_options.debug_tensor_watch_opts)\n",
"# Copyright 2016 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Composes one or more `LinearOperators`.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom tensorflow.contrib.linalg.python.ops import linear_operator\nfrom tensorflow.python.framework import common_shapes\nfrom tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.framework import tensor_shape\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import check_ops\n\n__all__ = [\"LinearOperatorComposition\"]\n\n\nclass LinearOperatorComposition(linear_operator.LinearOperator):\n \"\"\"Composes one or more `LinearOperators`.\n\n This operator composes one or more linear operators `[op1,...,opJ]`,\n building a new `LinearOperator` with action defined by:\n\n ```\n op_composed(x) := op1(op2(...(opJ(x)...))\n ```\n\n If `opj` acts like [batch] matrix `Aj`, then `op_composed` acts like the\n [batch] matrix formed with the multiplication `A1 A2...AJ`.\n\n If `opj` has shape `batch_shape_j + [M_j, N_j]`, then we must have\n `N_j = M_{j+1}`, in which case the composed operator has shape equal to\n `broadcast_batch_shape + [M_1, N_J]`, where `broadcast_batch_shape` is the\n mutual broadcast of `batch_shape_j`, `j = 1,...,J`, assuming the intermediate\n batch shapes broadcast. Even if the composed shape is well defined, the\n composed operator's methods may fail due to lack of broadcasting ability in\n the defining operators' methods.\n\n ```python\n # Create a 2 x 2 linear operator composed of two 2 x 2 operators.\n operator_1 = LinearOperatorFullMatrix([[1., 2.], [3., 4.]])\n operator_2 = LinearOperatorFullMatrix([[1., 0.], [0., 1.]])\n operator = LinearOperatorComposition([operator_1, operator_2])\n\n operator.to_dense()\n ==> [[1., 2.]\n [3., 4.]]\n\n operator.shape\n ==> [2, 2]\n\n operator.log_abs_determinant()\n ==> scalar Tensor\n\n x = ... Shape [2, 4] Tensor\n operator.matmul(x)\n ==> Shape [2, 4] Tensor\n\n # Create a [2, 3] batch of 4 x 5 linear operators.\n matrix_45 = tf.random_normal(shape=[2, 3, 4, 5])\n operator_45 = LinearOperatorFullMatrix(matrix)\n\n # Create a [2, 3] batch of 5 x 6 linear operators.\n matrix_56 = tf.random_normal(shape=[2, 3, 5, 6])\n operator_56 = LinearOperatorFullMatrix(matrix_56)\n\n # Compose to create a [2, 3] batch of 4 x 6 operators.\n operator_46 = LinearOperatorComposition([operator_45, operator_56])\n\n # Create a shape [2, 3, 6, 2] vector.\n x = tf.random_normal(shape=[2, 3, 6, 2])\n operator.matmul(x)\n ==> Shape [2, 3, 4, 2] Tensor\n ```\n\n #### Performance\n\n The performance of `LinearOperatorComposition` on any operation is equal to\n the sum of the individual operators' operations.\n\n\n #### Matrix property hints\n\n This `LinearOperator` is initialized with boolean flags of the form `is_X`,\n for `X = non_singular, self_adjoint, positive_definite, square`.\n These have the following meaning\n * If `is_X == True`, callers should expect the operator to have the\n property `X`. This is a promise that should be fulfilled, but is *not* a\n runtime assert. For example, finite floating point precision may result\n in these promises being violated.\n * If `is_X == False`, callers should expect the operator to not have `X`.\n * If `is_X == None` (the default), callers should have no expectation either\n way.\n \"\"\"\n\n def __init__(self,\n operators,\n is_non_singular=None,\n is_self_adjoint=None,\n is_positive_definite=None,\n is_square=None,\n name=None):\n r\"\"\"Initialize a `LinearOperatorComposition`.\n\n `LinearOperatorComposition` is initialized with a list of operators\n `[op_1,...,op_J]`. For the `matmul` method to be well defined, the\n composition `op_i.matmul(op_{i+1}(x))` must be defined. Other methods have\n similar constraints.\n\n Args:\n operators: Iterable of `LinearOperator` objects, each with\n the same `dtype` and composable shape.\n is_non_singular: Expect that this operator is non-singular.\n is_self_adjoint: Expect that this operator is equal to its hermitian\n transpose.\n is_positive_definite: Expect that this operator is positive definite,\n meaning the quadratic form `x^H A x` has positive real part for all\n nonzero `x`. Note that we do not require the operator to be\n self-adjoint to be positive-definite. See:\n https://en.wikipedia.org/wiki/Positive-definite_matrix\\\n #Extension_for_non_symmetric_matrices\n is_square: Expect that this operator acts like square [batch] matrices.\n name: A name for this `LinearOperator`. Default is the individual\n operators names joined with `_o_`.\n\n Raises:\n TypeError: If all operators do not have the same `dtype`.\n ValueError: If `operators` is empty.\n \"\"\"\n # Validate operators.\n check_ops.assert_proper_iterable(operators)\n operators = list(operators)\n if not operators:\n raise ValueError(\n \"Expected a non-empty list of operators. Found: %s\" % operators)\n self._operators = operators\n\n # Validate dtype.\n dtype = operators[0].dtype\n for operator in operators:\n if operator.dtype != dtype:\n name_type = (str((o.name, o.dtype)) for o in operators)\n raise TypeError(\n \"Expected all operators to have the same dtype. Found %s\"\n % \" \".join(name_type))\n\n # Auto-set and check hints.\n if all(operator.is_non_singular for operator in operators):\n if is_non_singular is False:\n raise ValueError(\n \"The composition of non-singular operators is always non-singular.\")\n is_non_singular = True\n\n # Initialization.\n graph_parents = []\n for operator in operators:\n graph_parents.extend(operator.graph_parents)\n\n if name is None:\n name = \"_o_\".join(operator.name for operator in operators)\n with ops.name_scope(name, values=graph_parents):\n super(LinearOperatorComposition, self).__init__(\n dtype=dtype,\n graph_parents=graph_parents,\n is_non_singular=is_non_singular,\n is_self_adjoint=is_self_adjoint,\n is_positive_definite=is_positive_definite,\n is_square=is_square,\n name=name)\n\n @property\n def operators(self):\n return self._operators\n\n def _shape(self):\n # Get final matrix shape.\n domain_dimension = self.operators[0].domain_dimension\n for operator in self.operators[1:]:\n domain_dimension.assert_is_compatible_with(operator.range_dimension)\n domain_dimension = operator.domain_dimension\n\n matrix_shape = tensor_shape.TensorShape(\n [self.operators[0].range_dimension,\n self.operators[-1].domain_dimension])\n\n # Get broadcast batch shape.\n # broadcast_shape checks for compatibility.\n batch_shape = self.operators[0].batch_shape\n for operator in self.operators[1:]:\n batch_shape = common_shapes.broadcast_shape(\n batch_shape, operator.batch_shape)\n\n return batch_shape.concatenate(matrix_shape)\n\n def _shape_tensor(self):\n # Avoid messy broadcasting if possible.\n if self.shape.is_fully_defined():\n return ops.convert_to_tensor(\n self.shape.as_list(), dtype=dtypes.int32, name=\"shape\")\n\n # Don't check the matrix dimensions. That would add unnecessary Asserts to\n # the graph. Things will fail at runtime naturally if shapes are\n # incompatible.\n matrix_shape = array_ops.stack([\n self.operators[0].range_dimension_tensor(),\n self.operators[-1].domain_dimension_tensor()\n ])\n\n # Dummy Tensor of zeros. Will never be materialized.\n zeros = array_ops.zeros(shape=self.operators[0].batch_shape_tensor())\n for operator in self.operators[1:]:\n zeros += array_ops.zeros(shape=operator.batch_shape_tensor())\n batch_shape = array_ops.shape(zeros)\n\n return array_ops.concat((batch_shape, matrix_shape), 0)\n\n def _matmul(self, x, adjoint=False, adjoint_arg=False):\n # If self.operators = [A, B], and not adjoint, then\n # matmul_order_list = [B, A].\n # As a result, we return A.matmul(B.matmul(x))\n if adjoint:\n matmul_order_list = self.operators\n else:\n matmul_order_list = list(reversed(self.operators))\n\n result = matmul_order_list[0].matmul(\n x, adjoint=adjoint, adjoint_arg=adjoint_arg)\n for operator in matmul_order_list[1:]:\n result = operator.matmul(result, adjoint=adjoint)\n return result\n\n def _determinant(self):\n result = self.operators[0].determinant()\n for operator in self.operators[1:]:\n result *= operator.determinant()\n return result\n\n def _log_abs_determinant(self):\n result = self.operators[0].log_abs_determinant()\n for operator in self.operators[1:]:\n result += operator.log_abs_determinant()\n return result\n\n def _solve(self, rhs, adjoint=False, adjoint_arg=False):\n # TODO(langmore) Implement solve using solve_ls if some intermediate\n # operator maps to a high dimensional space.\n # In that case, an exact solve may still be possible.\n\n # If self.operators = [A, B], and not adjoint, then\n # solve_order_list = [A, B].\n # As a result, we return B.solve(A.solve(x))\n if adjoint:\n solve_order_list = list(reversed(self.operators))\n else:\n solve_order_list = self.operators\n\n solution = solve_order_list[0].solve(\n rhs, adjoint=adjoint, adjoint_arg=adjoint_arg)\n for operator in solve_order_list[1:]:\n solution = operator.solve(solution, adjoint=adjoint)\n return solution\n\n def _add_to_tensor(self, x):\n return self.to_dense() + x\n",
"# Copyright 2017 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\nr\"\"\"Convert checkpoints using RNNCells to new name convention.\n\nUsage:\n\n python checkpoint_convert.py [--write_v1_checkpoint] \\\n '/path/to/checkpoint' '/path/to/new_checkpoint'\n\nFor example, if there is a V2 checkpoint to be converted and the files include:\n /tmp/my_checkpoint/model.ckpt.data-00000-of-00001\n /tmp/my_checkpoint/model.ckpt.index\n /tmp/my_checkpoint/model.ckpt.meta\n\nuse the following command:\n mkdir /tmp/my_converted_checkpoint &&\n python checkpoint_convert.py \\\n /tmp/my_checkpoint/model.ckpt /tmp/my_converted_checkpoint/model.ckpt\n\nThis will generate three converted checkpoint files corresponding to the three\nold ones in the new directory:\n /tmp/my_converted_checkpoint/model.ckpt.data-00000-of-00001\n /tmp/my_converted_checkpoint/model.ckpt.index\n /tmp/my_converted_checkpoint/model.ckpt.meta\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport argparse\nimport collections\nimport re\nimport sys\n\nfrom tensorflow.core.protobuf import saver_pb2\nfrom tensorflow.python import pywrap_tensorflow\nfrom tensorflow.python.client import session\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import variables\nfrom tensorflow.python.platform import app\nfrom tensorflow.python.platform import tf_logging as logging\nfrom tensorflow.python.training import saver as saver_lib\n\n_RNN_NAME_REPLACEMENTS = collections.OrderedDict([\n ############################################################################\n # contrib/rnn/python/ops/core_rnn_cell_impl.py\n # BasicRNNCell\n ('basic_rnn_cell/weights', 'basic_rnn_cell/kernel'),\n ('basic_rnn_cell/biases', 'basic_rnn_cell/bias'),\n # GRUCell\n ('gru_cell/weights', 'gru_cell/kernel'),\n ('gru_cell/biases', 'gru_cell/bias'),\n ('gru_cell/gates/weights', 'gru_cell/gates/kernel'),\n ('gru_cell/gates/biases', 'gru_cell/gates/bias'),\n ('gru_cell/candidate/weights', 'gru_cell/candidate/kernel'),\n ('gru_cell/candidate/biases', 'gru_cell/candidate/bias'),\n # BasicLSTMCell\n ('basic_lstm_cell/weights', 'basic_lstm_cell/kernel'),\n ('basic_lstm_cell/biases', 'basic_lstm_cell/bias'),\n # LSTMCell\n ('lstm_cell/weights', 'lstm_cell/kernel'),\n ('lstm_cell/biases', 'lstm_cell/bias'),\n ('lstm_cell/projection/weights', 'lstm_cell/projection/kernel'),\n ('lstm_cell/projection/biases', 'lstm_cell/projection/bias'),\n # OutputProjectionWrapper\n ('output_projection_wrapper/weights', 'output_projection_wrapper/kernel'),\n ('output_projection_wrapper/biases', 'output_projection_wrapper/bias'),\n # InputProjectionWrapper\n ('input_projection_wrapper/weights', 'input_projection_wrapper/kernel'),\n ('input_projection_wrapper/biases', 'input_projection_wrapper/bias'),\n ############################################################################\n # contrib/rnn/python/ops/lstm_ops.py\n # LSTMBlockFusedCell ??\n ('lstm_block_wrapper/weights', 'lstm_block_wrapper/kernel'),\n ('lstm_block_wrapper/biases', 'lstm_block_wrapper/bias'),\n ############################################################################\n # contrib/rnn/python/ops/rnn_cell.py\n # LayerNormBasicLSTMCell\n ('layer_norm_basic_lstm_cell/weights', 'layer_norm_basic_lstm_cell/kernel'),\n ('layer_norm_basic_lstm_cell/biases', 'layer_norm_basic_lstm_cell/bias'),\n # UGRNNCell, not found in g3, but still need it?\n ('ugrnn_cell/weights', 'ugrnn_cell/kernel'),\n ('ugrnn_cell/biases', 'ugrnn_cell/bias'),\n # NASCell\n ('nas_rnn/weights', 'nas_rnn/kernel'),\n ('nas_rnn/recurrent_weights', 'nas_rnn/recurrent_kernel'),\n # IntersectionRNNCell\n ('intersection_rnn_cell/weights', 'intersection_rnn_cell/kernel'),\n ('intersection_rnn_cell/biases', 'intersection_rnn_cell/bias'),\n ('intersection_rnn_cell/in_projection/weights',\n 'intersection_rnn_cell/in_projection/kernel'),\n ('intersection_rnn_cell/in_projection/biases',\n 'intersection_rnn_cell/in_projection/bias'),\n # PhasedLSTMCell\n ('phased_lstm_cell/mask_gates/weights',\n 'phased_lstm_cell/mask_gates/kernel'),\n ('phased_lstm_cell/mask_gates/biases', 'phased_lstm_cell/mask_gates/bias'),\n ('phased_lstm_cell/new_input/weights', 'phased_lstm_cell/new_input/kernel'),\n ('phased_lstm_cell/new_input/biases', 'phased_lstm_cell/new_input/bias'),\n ('phased_lstm_cell/output_gate/weights',\n 'phased_lstm_cell/output_gate/kernel'),\n ('phased_lstm_cell/output_gate/biases',\n 'phased_lstm_cell/output_gate/bias'),\n # AttentionCellWrapper\n ('attention_cell_wrapper/weights', 'attention_cell_wrapper/kernel'),\n ('attention_cell_wrapper/biases', 'attention_cell_wrapper/bias'),\n ('attention_cell_wrapper/attn_output_projection/weights',\n 'attention_cell_wrapper/attn_output_projection/kernel'),\n ('attention_cell_wrapper/attn_output_projection/biases',\n 'attention_cell_wrapper/attn_output_projection/bias'),\n ('attention_cell_wrapper/attention/weights',\n 'attention_cell_wrapper/attention/kernel'),\n ('attention_cell_wrapper/attention/biases',\n 'attention_cell_wrapper/attention/bias'),\n])\n\n_RNN_SHARDED_NAME_REPLACEMENTS = collections.OrderedDict([\n ('LSTMCell/W_', 'lstm_cell/weights/part_'),\n ('BasicLSTMCell/Linear/Matrix_', 'basic_lstm_cell/weights/part_'),\n ('GRUCell/W_', 'gru_cell/weights/part_'),\n ('MultiRNNCell/Cell', 'multi_rnn_cell/cell_'),\n])\n\n\ndef _rnn_name_replacement(var_name):\n for pattern in _RNN_NAME_REPLACEMENTS:\n if pattern in var_name:\n old_var_name = var_name\n var_name = var_name.replace(pattern, _RNN_NAME_REPLACEMENTS[pattern])\n logging.info('Converted: %s --> %s' % (old_var_name, var_name))\n break\n return var_name\n\n\ndef _rnn_name_replacement_sharded(var_name):\n for pattern in _RNN_SHARDED_NAME_REPLACEMENTS:\n if pattern in var_name:\n old_var_name = var_name\n var_name = var_name.replace(pattern,\n _RNN_SHARDED_NAME_REPLACEMENTS[pattern])\n logging.info('Converted: %s --> %s' % (old_var_name, var_name))\n return var_name\n\n\ndef _split_sharded_vars(name_shape_map):\n \"\"\"Split shareded variables.\n\n Args:\n name_shape_map: A dict from variable name to variable shape.\n\n Returns:\n not_sharded: Names of the non-sharded variables.\n sharded: Names of the sharded variables.\n \"\"\"\n sharded = []\n not_sharded = []\n for name in name_shape_map:\n if re.match(name, '_[0-9]+$'):\n if re.sub('_[0-9]+$', '_1', name) in name_shape_map:\n sharded.append(name)\n else:\n not_sharded.append(name)\n else:\n not_sharded.append(name)\n return not_sharded, sharded\n\n\ndef convert_names(checkpoint_from_path,\n checkpoint_to_path,\n write_v1_checkpoint=False):\n \"\"\"Migrates the names of variables within a checkpoint.\n\n Args:\n checkpoint_from_path: Path to source checkpoint to be read in.\n checkpoint_to_path: Path to checkpoint to be written out.\n write_v1_checkpoint: Whether the output checkpoint will be in V1 format.\n\n Returns:\n A dictionary that maps the new variable names to the Variable objects.\n A dictionary that maps the old variable names to the new variable names.\n \"\"\"\n with ops.Graph().as_default():\n logging.info('Reading checkpoint_from_path %s' % checkpoint_from_path)\n reader = pywrap_tensorflow.NewCheckpointReader(checkpoint_from_path)\n name_shape_map = reader.get_variable_to_shape_map()\n not_sharded, sharded = _split_sharded_vars(name_shape_map)\n new_variable_map = {}\n conversion_map = {}\n for var_name in not_sharded:\n new_var_name = _rnn_name_replacement(var_name)\n tensor = reader.get_tensor(var_name)\n var = variables.Variable(tensor, name=var_name)\n new_variable_map[new_var_name] = var\n if new_var_name != var_name:\n conversion_map[var_name] = new_var_name\n for var_name in sharded:\n new_var_name = _rnn_name_replacement_sharded(var_name)\n var = variables.Variable(tensor, name=var_name)\n new_variable_map[new_var_name] = var\n if new_var_name != var_name:\n conversion_map[var_name] = new_var_name\n\n write_version = (saver_pb2.SaverDef.V1\n if write_v1_checkpoint else saver_pb2.SaverDef.V2)\n saver = saver_lib.Saver(new_variable_map, write_version=write_version)\n\n with session.Session() as sess:\n sess.run(variables.global_variables_initializer())\n logging.info('Writing checkpoint_to_path %s' % checkpoint_to_path)\n saver.save(sess, checkpoint_to_path)\n\n logging.info('Summary:')\n logging.info(' Converted %d variable name(s).' % len(new_variable_map))\n return new_variable_map, conversion_map\n\n\ndef main(_):\n convert_names(\n FLAGS.checkpoint_from_path,\n FLAGS.checkpoint_to_path,\n write_v1_checkpoint=FLAGS.write_v1_checkpoint)\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.register('type', 'bool', lambda v: v.lower() == 'true')\n parser.add_argument('checkpoint_from_path', type=str,\n help='Path to source checkpoint to be read in.')\n parser.add_argument('checkpoint_to_path', type=str,\n help='Path to checkpoint to be written out.')\n parser.add_argument('--write_v1_checkpoint', action='store_true',\n help='Write v1 checkpoint')\n FLAGS, unparsed = parser.parse_known_args()\n\n app.run(main=main, argv=[sys.argv[0]] + unparsed)\n"
] |
[
[
"tensorflow.clip_by_value",
"tensorflow.nn.xw_plus_b",
"tensorflow.nn.relu",
"tensorflow.train.Server",
"tensorflow.truncated_normal",
"tensorflow.Variable",
"tensorflow.zeros",
"tensorflow.train.ClusterSpec",
"tensorflow.placeholder",
"tensorflow.ConfigProto",
"tensorflow.global_variables_initializer",
"tensorflow.train.replica_device_setter",
"tensorflow.train.SyncReplicasOptimizer",
"tensorflow.train.Supervisor",
"tensorflow.train.AdamOptimizer",
"tensorflow.examples.tutorials.mnist.input_data.read_data_sets",
"tensorflow.app.run"
],
[
"tensorflow.python.framework.tensor_shape.scalar",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.framework.cpp_shape_inference_pb2.CppShapeInferenceResult",
"tensorflow.python.framework.tensor_util.constant_value_as_shape",
"numpy.asarray",
"tensorflow.python.framework.tensor_shape.Dimension",
"tensorflow.python.framework.tensor_shape.as_dimension",
"tensorflow.python.framework.cpp_shape_inference_pb2.CppShapeInferenceInputsNeeded",
"tensorflow.python.framework.tensor_util.constant_value",
"tensorflow.python.framework.errors.raise_exception_on_not_ok_status",
"tensorflow.python.framework.tensor_shape.unknown_shape",
"tensorflow.python.pywrap_tensorflow.RunCppShapeInference",
"tensorflow.python.framework.ops._set_call_cpp_shape_fn"
],
[
"tensorflow.core.protobuf.config_pb2.RunMetadata",
"tensorflow.python.training.basic_session_run_hooks.StepCounterHook",
"tensorflow.python.ops.resources.report_uninitialized_resources",
"tensorflow.core.protobuf.config_pb2.RunOptions",
"tensorflow.python.training.basic_session_run_hooks.SummarySaverHook",
"tensorflow.python.framework.ops.add_to_collection",
"tensorflow.python.training.saver._get_saver_or_default",
"tensorflow.python.training.queue_runner.start_queue_runners",
"tensorflow.python.ops.variables.report_uninitialized_variables",
"tensorflow.python.framework.ops.get_collection",
"tensorflow.python.ops.variables.local_variables_initializer",
"tensorflow.python.training.basic_session_run_hooks.CheckpointSaverHook",
"tensorflow.python.ops.variables.global_variables",
"tensorflow.python.ops.resources.shared_resources",
"tensorflow.python.ops.lookup_ops.tables_initializer",
"tensorflow.python.training.coordinator.Coordinator",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.training.session_run_hook.SessionRunValues",
"tensorflow.python.framework.ops.get_default_graph",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.training.session_run_hook.SessionRunArgs"
],
[
"tensorflow.python.ops.check_ops.assert_proper_iterable",
"tensorflow.python.framework.tensor_shape.TensorShape",
"tensorflow.python.ops.array_ops.shape",
"tensorflow.python.ops.array_ops.concat",
"tensorflow.python.framework.ops.name_scope",
"tensorflow.python.framework.common_shapes.broadcast_shape"
],
[
"tensorflow.python.platform.app.run",
"tensorflow.python.framework.ops.Graph",
"tensorflow.python.platform.tf_logging.info",
"tensorflow.python.pywrap_tensorflow.NewCheckpointReader",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.client.session.Session",
"tensorflow.python.ops.variables.global_variables_initializer",
"tensorflow.python.training.saver.Saver"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"1.4",
"2.6",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.8",
"1.10",
"1.12",
"2.7",
"2.6",
"1.4",
"1.13",
"2.3",
"2.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.2",
"1.2",
"2.10"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"1.2"
]
}
] |
Robert-sktai/skt
|
[
"3075c1f4875f9acba927a53c7a2d0e1f7eb302dc",
"3075c1f4875f9acba927a53c7a2d0e1f7eb302dc"
] |
[
"skt/ye.py",
"skt/mls.py"
] |
[
"from skt.vault_utils import get_secrets\n\n\ndef get_hive_conn():\n from pyhive import hive\n\n hiveserver2 = get_secrets(path=\"ye/hiveserver2\")\n host = hiveserver2[\"ip\"]\n port = hiveserver2[\"port\"]\n user = hiveserver2[\"user\"]\n conn = hive.connect(host, port=port, username=user)\n return conn\n\n\ndef get_hdfs_conn():\n import os\n import pyarrow\n\n os.environ[\"ARROW_LIBHDFS_DIR\"] = \"/usr/hdp/3.0.1.0-187/usr/lib\"\n conn = pyarrow.hdfs.connect(user=\"airflow\")\n return conn\n\n\ndef get_sqlalchemy_engine():\n from sqlalchemy import create_engine\n\n hiveserver2 = get_secrets(path=\"ye/hiveserver2\")\n host = hiveserver2[\"ip\"]\n port = hiveserver2[\"port\"]\n user = hiveserver2[\"user\"]\n return create_engine(f\"hive://{user}@{host}:{port}/tmp\")\n\n\ndef get_pkl_from_hdfs(pkl_path):\n import pickle\n\n conn = get_hdfs_conn()\n byte_object = conn.cat(f\"{pkl_path}\")\n pkl_object = pickle.loads(byte_object)\n return pkl_object\n\n\ndef get_spark(scale=0, queue=None):\n import os\n import uuid\n import tempfile\n from pyspark.sql import SparkSession\n from skt.vault_utils import get_secrets\n\n tmp_uuid = str(uuid.uuid4())\n app_name = f\"skt-{os.environ.get('USER', 'default')}-{tmp_uuid}\"\n if not queue:\n if \"JUPYTERHUB_USER\" in os.environ:\n queue = \"dmig_eda\"\n else:\n queue = \"airflow_job\"\n os.environ[\"ARROW_PRE_0_15_IPC_FORMAT\"] = \"1\"\n\n key = get_secrets(\"gcp/sktaic-datahub/dataflow\")[\"config\"]\n key_file_name = tempfile.mkstemp()[1]\n with open(key_file_name, \"wb\") as key_file:\n key_file.write(key.encode())\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = key_file.name\n\n if scale in [1, 2, 3, 4]:\n spark = (\n SparkSession.builder.config(\"spark.app.name\", app_name)\n .config(\"spark.driver.memory\", f\"{scale*8}g\")\n .config(\"spark.executor.memory\", f\"{scale*3}g\")\n .config(\"spark.executor.instances\", f\"{scale*8}\")\n .config(\"spark.driver.maxResultSize\", f\"{scale*4}g\")\n .config(\"spark.rpc.message.maxSize\", \"1024\")\n .config(\"spark.yarn.queue\", queue)\n .config(\"spark.ui.enabled\", \"false\")\n .config(\"spark.port.maxRetries\", \"128\")\n .config(\"spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT\", \"1\")\n .config(\"spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT\", \"1\")\n .config(\"spark.jars\", \"gs://external_libs/spark/jars/spark-bigquery-with-dependencies_2.11-0.16.1.jar\",)\n .enableHiveSupport()\n .getOrCreate()\n )\n else:\n spark = (\n SparkSession.builder.config(\"spark.app.name\", app_name)\n .config(\"spark.driver.memory\", \"6g\")\n .config(\"spark.executor.memory\", \"8g\")\n .config(\"spark.shuffle.service.enabled\", \"true\")\n .config(\"spark.dynamicAllocation.enabled\", \"true\")\n .config(\"spark.dynamicAllocation.maxExecutors\", \"200\")\n .config(\"spark.driver.maxResultSize\", \"6g\")\n .config(\"spark.rpc.message.maxSize\", \"1024\")\n .config(\"spark.yarn.queue\", queue)\n .config(\"spark.ui.enabled\", \"false\")\n .config(\"spark.port.maxRetries\", \"128\")\n .config(\"spark.executorEnv.ARROW_PRE_0_15_IPC_FORMAT\", \"1\")\n .config(\"spark.yarn.appMasterEnv.ARROW_PRE_0_15_IPC_FORMAT\", \"1\")\n .config(\"spark.jars\", \"gs://external_libs/spark/jars/spark-bigquery-with-dependencies_2.11-0.16.1.jar\",)\n .enableHiveSupport()\n .getOrCreate()\n )\n spark.conf.set(\"spark.sql.execution.arrow.enabled\", \"true\")\n return spark\n\n\ndef hive_execute(query):\n conn = get_hive_conn()\n c = conn.cursor()\n c.execute(query)\n c.close()\n conn.close()\n\n\ndef hive_get_result(query):\n conn = get_hive_conn()\n c = conn.cursor()\n c.execute(query)\n result = c.fetchall()\n c.close()\n conn.close()\n return result\n\n\ndef hive_to_pandas(query, scale=0):\n if scale == 1:\n import pandas\n\n conn = get_hive_conn()\n df = pandas.read_sql(query, conn)\n df.info()\n conn.close()\n return df\n\n import uuid\n\n tmp_id = str(uuid.uuid4()).replace(\"-\", \"_\")\n ctas = f\"CREATE TABLE dumbo.{tmp_id} stored as parquet as {query}\"\n conn = get_hive_conn()\n c = conn.cursor()\n c.execute(\"set parquet.column.index.access=false\")\n c.execute(ctas)\n hdfs = get_hdfs_conn()\n table_path = hdfs.ls(f\"/warehouse/tablespace/managed/hive/dumbo.db/{tmp_id}\")[0]\n hdfs.close()\n df = parquet_to_pandas(table_path)\n c.execute(f\"DROP TABLE dumbo.{tmp_id}\")\n c.close()\n conn.close()\n return df\n\n\ndef parquet_to_pandas(hdfs_path):\n from pyarrow import parquet\n\n hdfs = get_hdfs_conn()\n df = parquet.read_table(hdfs_path, filesystem=hdfs).to_pandas()\n df.info()\n return df\n\n\ndef pandas_to_parquet(pandas_df, hdfs_path, spark):\n df = spark.createDataFrame(pandas_df)\n df.write.mode(\"overwrite\").parquet(hdfs_path)\n\n\ndef slack_send(\n text=\"This is default text\",\n username=\"SKT\",\n channel=\"#leavemealone\",\n icon_emoji=\":large_blue_circle:\",\n blocks=None,\n dataframe=False,\n):\n import requests\n from skt.vault_utils import get_secrets\n\n if dataframe:\n from tabulate import tabulate\n\n text = \"```\" + tabulate(text, tablefmt=\"simple\", headers=\"keys\") + \"```\"\n\n token = get_secrets(\"slack\")[\"bot_token\"][\"airflow\"]\n proxy = get_secrets(\"proxy\")[\"proxy\"]\n proxies = {\n \"http\": proxy,\n \"https\": proxy,\n }\n headers = {\n \"Content-Type\": \"application/json;charset=utf-8\",\n \"Authorization\": f\"Bearer {token}\",\n }\n json_body = {\n \"username\": username,\n \"channel\": channel,\n \"text\": text,\n \"blocks\": blocks,\n \"icon_emoji\": icon_emoji,\n }\n r = requests.post(\"https://www.slack.com/api/chat.postMessage\", proxies=proxies, headers=headers, json=json_body,)\n r.raise_for_status()\n if not r.json()[\"ok\"]:\n raise Exception(r.json())\n\n\ndef get_github_util():\n from skt.github_utils import GithubUtil\n\n github_token = get_secrets(\"github/sktaiflow\")[\"token\"]\n proxy = get_secrets(\"proxy\")[\"proxy\"]\n proxies = {\n \"http\": proxy,\n \"https\": proxy,\n }\n g = GithubUtil(github_token, proxies)\n return g\n\n\ndef _write_to_parquet_via_spark(pandas_df, hdfs_path):\n spark = get_spark()\n spark_df = spark.createDataFrame(pandas_df)\n spark_df.write.mode(\"overwrite\").parquet(hdfs_path)\n\n\ndef _write_to_parquet(pandas_df, hdfs_path):\n import pyarrow as pa\n import pyarrow.parquet as pq\n\n # Read Parquet INT64 timestamp issue:\n # https://issues.apache.org/jira/browse/HIVE-21215\n if \"datetime64[ns]\" in pandas_df.dtypes.tolist():\n _write_to_parquet_via_spark(pandas_df, hdfs_path)\n return\n\n pa_table = pa.Table.from_pandas(pandas_df)\n hdfs_conn = get_hdfs_conn()\n try:\n pq.write_to_dataset(pa_table, root_path=hdfs_path, filesystem=hdfs_conn)\n finally:\n hdfs_conn.close()\n\n\ndef _write_df(pandas_df, schema_name, table_name, hdfs_path, engine, cursor, tmp_table_name):\n import sqlalchemy.exc\n\n cursor.execute(f\"drop table if exists {schema_name}.{tmp_table_name}\")\n try:\n pandas_df.to_sql(tmp_table_name, engine, schema=schema_name, if_exists=\"replace\", index=False)\n except sqlalchemy.exc.ProgrammingError:\n # Hive bulk insert issue:\n # https://github.com/dropbox/PyHive/issues/343\n pass\n\n cursor.execute(f\"drop table if exists {schema_name}.{table_name}\")\n if hdfs_path is None:\n cursor.execute(\n f\"\"\"create table {schema_name}.{table_name}\n like {schema_name}.{tmp_table_name}\n stored as parquet\"\"\"\n )\n cursor.execute(f\"show create table {schema_name}.{table_name}\")\n result = cursor.fetchall()\n managed_hdfs_path = list(filter(lambda row: row[0].strip().find(\"hdfs://\") == 1, result))[0][0].strip()[1:-1]\n _write_to_parquet(pandas_df, managed_hdfs_path)\n else:\n cursor.execute(\n f\"\"\"create external table {schema_name}.{table_name}\n like {schema_name}.{tmp_table_name}\n stored as parquet\n location '{hdfs_path}'\"\"\"\n )\n\n\ndef write_df_to_hive(pandas_df, schema_name, table_name, hdfs_path=None):\n \"\"\"\n Exports a Panadas dataframe into a table in Hive.\n\n Example:\n write_df_to_hive(pandas_df1, \"my_schema\", \"my_table1\")\n write_df_to_hive(pandas_df2, \"my_schema\", \"my_table2\")\n write_df_to_hive(pandas_df1, \"my_schema\", \"my_table3\",\n hdfs_path=\"hdfs://.../my_schema.db/my_table1\")\n\n Parameters\n ----------\n pandas_df : an ojbect of Pandas Dataframe\n schema_name : str\n A target schema name of Hive\n table_name : str\n A target table name of Hive\n hdfs_path : str, default None\n A path of Hadoop file system as an optional parameter.\n It will be used to create an external table. If hdfs_path\n is not None, data in the dataframe will not be converted.\n A metadata in the dataframe is just used to create a Hive\n table.\n \"\"\"\n engine = get_sqlalchemy_engine()\n conn = get_hive_conn()\n cursor = conn.cursor()\n\n import hashlib\n\n tmp_table_name = hashlib.sha1(str(f\"{schema_name}.{table_name}\").encode(\"utf-8\")).hexdigest()\n\n try:\n _write_df(pandas_df, schema_name, table_name, hdfs_path, engine, cursor, tmp_table_name)\n finally:\n cursor.execute(f\"drop table if exists {schema_name}.{tmp_table_name}\")\n cursor.close()\n conn.close()\n",
"from pathlib import Path\nfrom typing import Dict, Any\nfrom enum import Enum\n\nimport requests\nimport pandas as pd\nimport os\n\nfrom skt.vault_utils import get_secrets\n\n\nMLS_MODEL_DIR = os.path.join(Path.home(), \"mls_temp_dir\")\nMODEL_BINARY_NAME = \"model.joblib\"\nMODEL_TAR_NAME = \"model.tar.gz\"\nMODEL_META_NAME = \"model.json\"\nS3_DEFAULT_PATH = get_secrets(\"mls\")[\"s3_model_registry_path\"]\n\nEDD_OPTIONS = get_secrets(\"mls\")[\"edd_options\"]\n\nMLS_COMPONENTS_API_URL = \"/api/v1/components\"\nMLS_META_API_URL = \"/api/v1/meta_tables\"\nMLS_MLMODEL_API_URL = \"/api/v1/models\"\n\n\ndef get_mls_meta_table_client(env=\"stg\", user=\"reco\"):\n from sktmls.meta_tables.meta_table import MetaTableClient\n from sktmls import MLSENV\n\n if env == \"prd\":\n env = MLSENV.PRD\n else:\n env = MLSENV.STG\n\n secrets = get_secrets(path=\"mls\")\n if user != \"reco\":\n user_id = secrets.get(f\"{user}_id\")\n user_pass = secrets.get(f\"{user}_pass\")\n else:\n user_id = secrets.get(\"reco_id\")\n user_pass = secrets.get(\"reco_pass\")\n\n if not user_id or not user_pass:\n raise Exception(\"No ID or Password for the user {user}\")\n\n return MetaTableClient(env=env, username=user_id, password=user_pass)\n\n\ndef create_or_update_meta_table(table_name, schema=None, env=\"stg\", user=\"reco\"):\n c = get_mls_meta_table_client(env=env, user=user)\n if c.meta_table_exists(name=table_name):\n t = c.get_meta_table(name=table_name)\n if schema:\n c.update_meta_table(meta_table=t, schema=schema)\n else:\n c.create_meta_table(name=table_name, schema=schema)\n\n\ndef upsert_meta_table(table_name, items_dict, env=\"stg\", user=\"reco\"):\n c = get_mls_meta_table_client(env=env, user=user)\n t = c.get_meta_table(name=table_name)\n items = c.create_meta_items(meta_table=t, items_dict=items_dict)\n return len(items)\n\n\ndef set_model_name(comm_db, params, user=\"reco\", edd: bool = False):\n secret = get_secrets(\"mls\")\n token = secret.get(\"user_token\").get(user)\n if comm_db[-3:] == \"dev\": # stg\n url = secret[\"ab_onprem_stg_url\"] if edd else secret[\"ab_stg_url\"]\n url = f\"{url}{MLS_COMPONENTS_API_URL}\"\n else: # prd\n url = secret[\"ab_onprem_prd_url\"] if edd else secret[\"ab_prd_url\"]\n url = f\"{url}{MLS_COMPONENTS_API_URL}\"\n requests.post(\n url, json=params, headers={\"Authorization\": f\"Basic {{{token}}}\"},\n )\n\n\ndef get_all_recent_model_path(comm_db, user=\"reco\", edd: bool = False):\n secret = get_secrets(\"mls\")\n token = secret.get(\"user_token\").get(user)\n if comm_db[-3:] == \"dev\": # stg\n url = secret[\"ab_onprem_stg_url\"] if edd else secret[\"ab_stg_url\"]\n url = f\"{url}{MLS_COMPONENTS_API_URL}\"\n else: # prd\n url = secret[\"ab_onprem_prd_url\"] if edd else secret[\"ab_prd_url\"]\n url = f\"{url}{MLS_COMPONENTS_API_URL}\"\n\n response = requests.get(url, headers={\"Authorization\": f\"Basic {{{token}}}\"}).json().get(\"results\")\n\n results = {component.get(\"name\"): component.get(\"info\") for component in response if component.get(\"is_latest\")}\n\n return results\n\n\ndef get_recent_model_path(comm_db, model_key, user=\"reco\", edd: bool = False):\n results = get_all_recent_model_path(comm_db, user, edd)\n return results.get(model_key)\n\n\ndef get_model_name(key, user=\"reco\", edd: bool = False):\n results = get_all_recent_model_path(\"prd\", user, edd)\n return results.get(key)\n\n\nclass ModelLibrary(Enum):\n LIGHTGBM = \"lightgbm\"\n XGBOOST = \"xgboost\"\n\n\nclass AWSENV(Enum):\n STG = \"stg\"\n PRD = \"prd\"\n DEV = \"dev\"\n\n\nclass MLSModelError(Exception):\n def __init__(self, msg):\n super().__init__(msg)\n\n\ndef get_meta_table(\n meta_table: str, aws_env: AWSENV = AWSENV.STG.value, user=\"reco\", edd: bool = False\n) -> Dict[str, Any]:\n \"\"\"\n Get a meta_table information\n Args. :\n - meta_table : (str) the name of meta_table\n - aws_env : (str) AWS ENV in 'stg / prd' (default is 'stg')\n - user : (str) the name of user (default is 'reco')\n - edd : (bool) True if On-prem env is on EDD (default is False)\n Returns :\n - Dictionary value of meta_table (id / name / description / schema / items / created_at / updated_at)\n \"\"\"\n assert type(meta_table) == str\n assert type(aws_env) == str\n\n secret = get_secrets(\"mls\")\n token = secret.get(\"user_token\").get(user)\n\n url = get_secrets(\"mls\")[f\"ab_{'onprem_' if edd else ''}{aws_env}_url\"]\n url = f\"{url}{MLS_META_API_URL}/{meta_table}\"\n\n response = requests.get(url, headers={\"Authorization\": f\"Basic {{{token}}}\"}).json()\n results = response.get(\"results\")\n\n if not results:\n raise MLSModelError(response.get(\"error\"))\n else:\n return results\n\n\ndef create_meta_table_item(\n meta_table: str,\n item_name: str,\n item_dict: Dict[str, Any],\n aws_env: AWSENV = AWSENV.STG.value,\n user=\"reco\",\n edd: bool = False,\n) -> None:\n \"\"\"\n Create a meta_item\n Args. :\n - meta_table : (str) the name of meta_table\n - item_name : (str) the name of meta_item to be added\n - item_dict : (dict) A dictionary type (item-value) value to upload to or update of the item\n - aws_env : (str) AWS ENV in 'stg / prd' (default is 'stg')\n - user : (str) the name of user (default is 'reco')\n - edd : (bool) True if On-prem env is on EDD (default is False)\n \"\"\"\n assert type(meta_table) == str\n assert type(item_name) == str\n assert type(item_dict) == dict\n assert type(aws_env) == str\n\n secret = get_secrets(\"mls\")\n token = secret.get(\"user_token\").get(user)\n\n meta_table_info = get_meta_table(meta_table, aws_env, user, edd)\n\n values_data = dict()\n for field_name, field_spec in meta_table_info[\"schema\"].items():\n values_data[field_name] = item_dict.get(field_name)\n\n request_data = dict()\n request_data[\"name\"] = item_name\n request_data[\"values\"] = values_data\n\n url = get_secrets(\"mls\")[f\"ab_{'onprem_' if edd else ''}{aws_env}_url\"]\n url = f\"{url}{MLS_META_API_URL}/{meta_table}/meta_items\"\n\n response = requests.post(url, json=request_data, headers={\"Authorization\": f\"Basic {{{token}}}\"}).json()\n results = response.get(\"results\")\n\n if not results:\n raise MLSModelError(response.get(\"error\"))\n\n\ndef update_meta_table_item(\n meta_table: str,\n item_name: str,\n item_dict: Dict[str, Any],\n aws_env: AWSENV = AWSENV.STG.value,\n user=\"reco\",\n edd: bool = False,\n) -> None:\n \"\"\"\n Update a meta_item\n Args. :\n - meta_table : (str) the name of meta_table\n - item_name : (str) the name of meta_item to be added\n - item_dict : (dict) A dictionary type (item-value) value to upload to or update of the item\n - aws_env : (str) AWS ENV in 'stg / prd' (default is 'stg')\n - user : (str) the name of user (default is 'reco')\n - edd : (bool) True if On-prem env is on EDD (default is False)\n \"\"\"\n assert type(meta_table) == str\n assert type(item_name) == str\n assert type(item_dict) == dict\n assert type(aws_env) == str\n\n secret = get_secrets(\"mls\")\n token = secret.get(\"user_token\").get(user)\n\n meta_table_info = get_meta_table(meta_table, aws_env, user, edd)\n\n values_data = dict()\n for field_name, field_spec in meta_table_info[\"schema\"].items():\n values_data[field_name] = item_dict.get(field_name)\n\n request_data = dict()\n request_data[\"name\"] = item_name\n request_data[\"values\"] = values_data\n\n url = get_secrets(\"mls\")[f\"ab_{'onprem_' if edd else ''}{aws_env}_url\"]\n url = f\"{url}{MLS_META_API_URL}/{meta_table}/meta_items/{item_name}\"\n\n response = requests.put(url, json=request_data, headers={\"Authorization\": f\"Basic {{{token}}}\"}).json()\n results = response.get(\"results\")\n\n if not results:\n raise MLSModelError(response.get(\"error\"))\n\n\ndef get_meta_table_item(\n meta_table: str, item_name: str, aws_env: AWSENV = AWSENV.STG.value, user=\"reco\", edd: bool = False\n) -> Dict[str, Any]:\n \"\"\"\n Get a meta_table information\n Args. :\n - meta_table : (str) the name of meta_table\n - item_name : (str) the name of meta_item to be added\n - aws_env : (str) AWS ENV in 'stg / prd' (default is 'stg')\n - user : (str) the name of user (default is 'reco')\n - edd : (bool) True if On-prem env is on EDD (default is False)\n Returns :\n - A dictionary type (item-value) value of the item_meta\n \"\"\"\n assert type(meta_table) == str\n assert type(item_name) == str\n assert type(aws_env) == str\n\n secret = get_secrets(\"mls\")\n token = secret.get(\"user_token\").get(user)\n\n url = get_secrets(\"mls\")[f\"ab_{'onprem_' if edd else ''}{aws_env}_url\"]\n url = f\"{url}{MLS_META_API_URL}/{meta_table}/meta_items/{item_name}\"\n\n response = requests.get(url, headers={\"Authorization\": f\"Basic {{{token}}}\"}).json()\n results = response.get(\"results\")\n\n if not results:\n raise MLSModelError(response.get(\"error\"))\n else:\n return results\n\n\ndef meta_table_to_pandas(meta_table: str, aws_env: AWSENV = AWSENV.STG.value, user=\"reco\", edd: bool = False) -> Any:\n \"\"\"\n Get a meta_table as pandas dataframe\n Args. :\n - meta_table : (str) the name of meta_table\n - aws_env : (str) AWS ENV in 'stg / prd' (default is 'stg')\n - user : (str) the name of user (default is 'reco')\n - edd : (bool) True if On-prem env is on EDD (default is False)\n Returns :\n - A Pandas dataframe type of the item_meta\n \"\"\"\n assert type(meta_table) == str\n assert type(aws_env) == str\n\n secret = get_secrets(\"mls\")\n token = secret.get(\"user_token\").get(user)\n\n url = get_secrets(\"mls\")[f\"ab_{'onprem_' if edd else ''}{aws_env}_url\"]\n url = f\"{url}{MLS_META_API_URL}/{meta_table}\"\n\n response = requests.get(url, headers={\"Authorization\": f\"Basic {{{token}}}\"}).json()\n\n if not response.get(\"results\"):\n raise MLSModelError(f\"No meta_table '{meta_table}' exists on AWS {aws_env}\")\n\n items = response[\"results\"][\"items\"]\n key = pd.DataFrame.from_records(items)[\"name\"]\n values = pd.DataFrame.from_records(pd.DataFrame.from_records(items)[\"values\"])\n\n df = pd.concat([key, values], axis=1)\n\n return df\n\n\ndef pandas_to_meta_table(\n method: str,\n meta_table: str,\n df: pd.DataFrame,\n key: str,\n values: list,\n aws_env: AWSENV = AWSENV.STG.value,\n user=\"reco\",\n edd: bool = False,\n) -> None:\n \"\"\"\n Create or Update items of a meta_table from Pandas Dataframe\n Args. :\n - method : (str) requests method 'create' or 'update'\n - meta_table : (str) MLS meta table name\n - df : (pd.DataFrame) input table\n - key : (str) key column in dataframe\n - values : (list) Dataframe columns for input\n - aws_env : (str) AWS ENV in 'stg / prd' (default is 'stg')\n - user : (str) the name of user (default is 'reco')\n - edd : (bool) True if On-prem env is on EDD (default is False)\n \"\"\"\n assert type(aws_env) == str\n assert method in [\"create\", \"update\"]\n assert type(meta_table) == str\n assert type(df) == pd.core.frame.DataFrame\n assert type(key) == str\n assert type(values) == list\n\n url = get_secrets(\"mls\")[f\"ab_{'onprem_' if edd else ''}{aws_env}_url\"]\n url = f\"{url}{MLS_META_API_URL}/{meta_table}/meta_items\"\n\n def to_json(x):\n insert_dict = {}\n insert_dict[\"name\"] = x[key]\n insert_dict[\"values\"] = {}\n\n for value in values:\n insert_dict[\"values\"][value] = x[value]\n\n return insert_dict\n\n json_series = df.apply(lambda x: to_json(x), axis=1)\n\n for meta in json_series:\n if method == \"create\":\n create_meta_table_item(meta_table, meta.get(\"name\"), meta.get(\"values\"), aws_env, user)\n else:\n update_meta_table_item(meta_table, meta.get(\"name\"), meta.get(\"values\"), aws_env, user)\n\n\ndef get_ml_model(\n user: str, model_name: str, model_version: str, aws_env: AWSENV = AWSENV.STG.value, edd: bool = False\n) -> Dict[str, Any]:\n \"\"\"\n Get an MLModel\n Args. :\n - user : (str) the name of a MLModel user\n - model_name : (str) the name of MLModel\n - model_version : (str) the version of MLModel\n - aws_env : (str) AWS ENV in 'stg / prd' (default is 'stg')\n - edd : (bool) True if On-prem env is on EDD (default is False)\n Returns :\n - Dictionary value of MLModel\n \"\"\"\n assert type(user) == str\n assert type(model_name) == str\n assert type(model_version) == str\n assert type(aws_env) == str\n\n url = get_secrets(\"mls\")[f\"ab_{'onprem_' if edd else ''}{aws_env}_url\"]\n url = f\"{url}{MLS_MLMODEL_API_URL}/{model_name}/versions/{model_version}\"\n\n response = requests.get(url, params={\"user\": user}).json()\n results = response.get(\"results\")\n\n if not results:\n raise MLSModelError(f\"No MLModel for user: {user} / model_name: {model_name} / model_version: {model_version}\")\n else:\n return results[0]\n\n\ndef get_ml_model_meta(\n user: str, model_name: str, model_version: str, aws_env: AWSENV = AWSENV.STG.value, edd: bool = False\n) -> Dict[str, Any]:\n \"\"\"\n Get a list of MLModel meta\n Args. :\n - user : (str) the name of a MLModel user\n - model_name : (str) the name of MLModel\n - model_version : (str) the version of MLModel\n - aws_env : (str) AWS ENV in 'stg / prd' (default is 'stg')\n - edd : (bool) True if On-prem env is on EDD (default is False)\n Returns :\n - Dictionary value of model_meta\n \"\"\"\n assert type(user) == str\n assert type(model_name) == str\n assert type(model_version) == str\n assert type(aws_env) == str\n\n url = get_secrets(\"mls\")[f\"ab_{'onprem_' if edd else ''}{aws_env}_url\"]\n url = f\"{url}{MLS_MLMODEL_API_URL}/{model_name}/versions/{model_version}/meta\"\n\n response = requests.get(url, params={\"user\": user}).json()\n results = response.get(\"results\")\n\n if not results:\n raise MLSModelError(f\"No MLModel for user: {user} / model_name: {model_name} / model_version: {model_version}\")\n else:\n return results[0].get(\"model_meta\")\n\n\ndef update_ml_model_meta(\n user: str,\n model_name: str,\n model_version: str,\n model_meta_dict: Dict[str, Any],\n aws_env: AWSENV = AWSENV.STG.value,\n edd: bool = False,\n) -> None:\n \"\"\"\n Update(or Create) model_meta\n Args. :\n - user : (str) the name of a MLModel user\n - model_name : (str) the name of MLModel\n - model_version : (str) the version of MLModel\n - model_meta_dict : (dict) the version of MLModel\n - aws_env : (str) AWS ENV in 'stg / prd' (default is 'stg')\n - edd : (bool) True if On-prem env is on EDD (default is False)\n \"\"\"\n assert type(model_name) == str\n assert type(model_version) == str\n assert type(model_meta_dict) == dict\n assert type(aws_env) == str\n\n url = get_secrets(\"mls\")[f\"ab_{'onprem_' if edd else ''}{aws_env}_url\"]\n url = f\"{url}{MLS_MLMODEL_API_URL}/{model_name}/versions/{model_version}/meta\"\n\n request_data = dict()\n request_data[\"user\"] = user\n request_data[\"model_meta\"] = model_meta_dict\n\n requests.patch(url, json=request_data).json()\n"
] |
[
[
"pandas.read_sql"
],
[
"pandas.DataFrame.from_records",
"pandas.concat"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
gzyszuuow/BusBunching
|
[
"e9b01a62e422ae6525d9287bdf46c25aacdf9c6c",
"e9b01a62e422ae6525d9287bdf46c25aacdf9c6c"
] |
[
"12.20/model2.py",
"Code/GetDataFromBusRoute.py"
] |
[
"from keras.models import Model\r\nfrom keras.layers import Input\r\nfrom keras.layers import LSTM\r\nfrom keras.layers import Dense\r\nfrom keras.layers import GRU,Embedding,concatenate,Reshape\r\nimport numpy as np\r\nimport keras\r\n#from loaddata2 import *\r\n\r\npath1 = \"C:\\\\Users\\\\bdu\\\\Desktop\\\\gzy\\\\BusBunching\\\\BusRoute400\\\\Test\\\\y_target_headway.npy\"\r\npath2 = \"C:\\\\Users\\\\bdu\\\\Desktop\\\\gzy\\\\BusBunching\\\\BusRoute400\\\\Test\\\\X2_headways.npy\"\r\npath3 = \"C:\\\\Users\\\\bdu\\\\Desktop\\\\gzy\\\\BusBunching\\\\BusRoute400\\\\Test\\\\X1_stopfeatures.npy\"\r\n\r\ny_target_headway = np.load(path1)\r\nX2_headways = np.load(path2)\r\nX1_stopfeatures = np.load(path3)\r\n\r\nStopID = []\r\nRoutesNum = []\r\nTimFeatures = []\r\nStopFeatures = []\r\n\r\nfor i in range(X1_stopfeatures.shape[0]):\r\n ls1 = []\r\n ls2 = []\r\n ls3 = []\r\n ls4 = []\r\n for index in range(0,28):\r\n ls1_each = []\r\n ls2_each = []\r\n ls3_each = []\r\n ls4_each = []\r\n\r\n l = list(X1_stopfeatures[i][index])\r\n\r\n ls1_each.append(l[0])\r\n ls2_each.append(l[1])\r\n ls3_each.append(l[3])\r\n\r\n for i in range(X1_stopfeatures.shape[2]):\r\n if i not in [0,1,3]:\r\n ls4_each.append(l[i])\r\n ls4.append(ls4_each)\r\n\r\n ls1.append(ls1_each)\r\n ls2.append(ls2_each)\r\n ls3.append(ls3_each)\r\n StopID.append(ls1)\r\n RoutesNum.append(ls2)\r\n TimFeatures.append(ls3)\r\n StopFeatures.append(ls4)\r\n\r\n\r\n\r\n#--------------------------------------------------------------------------------\r\nx = []\r\nwhile(len(x)<y_target_headway.shape[0]):\r\n x_ = np.random.randint(0,y_target_headway.shape[0],1)\r\n for val in x_:\r\n if val not in x:\r\n x.append(val)\r\n\r\ny_target_headway = y_target_headway.tolist()\r\nX2_headways = X2_headways.tolist()\r\nStopID_shuffle = []\r\nRoutesNum_shuffle = []\r\nTimFeatures_shuffle = []\r\nStopFeatures_shuffle = []\r\ny_target_headway_shuffle = []\r\nX2_headways_shuffle = []\r\nfor val in x:\r\n X2_headways_shuffle.append(X2_headways[val])\r\n y_target_headway_shuffle.append(y_target_headway[val])\r\n RoutesNum_shuffle.append(RoutesNum[val])\r\n TimFeatures_shuffle.append(TimFeatures[val])\r\n StopFeatures_shuffle.append(StopFeatures[val])\r\n StopID_shuffle.append(StopID[val])\r\n\r\n#--------------------------------------------------------------------------------\r\n#StopID = np.array(StopID)\r\n#RoutesNum = np.array(RoutesNum)\r\n#TimFeatures = np.array(TimFeatures)\r\n#StopFeatures = np.array(StopFeatures)\r\n\r\nX2_headways = np.array(X2_headways_shuffle)\r\ny_target_headway = np.array(y_target_headway_shuffle)\r\nRoutesNum = np.array(RoutesNum_shuffle)\r\nTimFeatures = np.array(TimFeatures_shuffle)\r\nStopFeatures = np.array(StopFeatures_shuffle)\r\nStopID = np.array(StopID_shuffle)\r\n\r\n\r\nprint(StopID.shape)\r\nprint(RoutesNum.shape)\r\nprint(TimFeatures.shape)\r\nprint(StopFeatures.shape)\r\n\r\n\r\nn_headway_features = 28\r\nn_stop_features = X1_stopfeatures.shape[2]\r\n\r\nn_units = 64\r\nepochs = 30\r\n\r\nlearning_rate = 0.01\r\ndecay = 0 # Learning rate decay\r\noptimiser = keras.optimizers.Adam(lr=learning_rate, decay=decay)\r\n\r\nbatch_size = 64\r\n\r\nMaxStopID = 27\r\nMaxRoutesNum = 95\r\nMaxTimeFeature = 3\r\nStopNumber = 28\r\n\r\n#define training encoder\r\nStopIDInput = Input(shape=(StopNumber,1))\r\nRoutesNumInput = Input(shape=(StopNumber,1))\r\nTimFeatureInput = Input(shape=(StopNumber,1))\r\n\r\nStopIDInputEmbed = Embedding(MaxStopID+1,4)(StopIDInput)\r\nRoutesNumInputembed = Embedding(MaxRoutesNum+1,4)(RoutesNumInput)\r\nTimFeatureInputembed = Embedding(MaxTimeFeature+1,4)(TimFeatureInput)\r\nStopVarietyTimeFeaturesInput = Input(shape=(StopNumber,n_stop_features - 3))\r\n\r\nStopIDInputEmbed = Reshape((StopNumber,4))(StopIDInputEmbed)\r\nRoutesNumInputembed = Reshape((StopNumber,4))(RoutesNumInputembed)\r\nTimFeatureInputembed = Reshape((StopNumber,4))(TimFeatureInputembed)\r\n\r\nencoder_inputs = concatenate([StopIDInputEmbed, RoutesNumInputembed,TimFeatureInputembed,StopVarietyTimeFeaturesInput],axis=-1)\r\nencoder = LSTM(n_units, return_state=True)\r\nencoder_outputs, state_h, state_c = encoder(encoder_inputs)\r\nencoder_states = [state_h, state_c]\r\n# define training decoder\r\ndecoder_inputs = Input(shape=(None, n_headway_features))\r\ndecoder_lstm = LSTM(n_units, return_sequences=True, return_state=True)\r\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)\r\ndecoder_dense = Dense(n_headway_features,activation='relu')\r\ndecoder_outputs = decoder_dense(decoder_outputs)\r\n\r\nmodel = Model(inputs = [StopIDInput,RoutesNumInput,TimFeatureInput,StopVarietyTimeFeaturesInput,decoder_inputs],outputs = decoder_outputs)\r\nmodel.compile(optimizer=optimiser, loss='mse',metrics=['acc'])\r\nmodel.fit([StopID,RoutesNum,TimFeatures,StopFeatures,X2_headways],y_target_headway,batch_size = batch_size,epochs = epochs,validation_split=0.1)\r\n\r\n\r\n\r\n\r\n'''\r\n#Test Model\r\nencoder_model = Model([StopIDInput,RoutesNumInput,TimFeatureInput,StopVarietyTimeFeaturesInput], encoder_states)\r\n# define inference decoder\r\ndecoder_state_input_h = Input(shape=(n_units,))\r\ndecoder_state_input_c = Input(shape=(n_units,))\r\ndecoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]\r\ndecoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_states_inputs)\r\ndecoder_states = [state_h, state_c]\r\ndecoder_outputs = decoder_dense(decoder_outputs)\r\ndecoder_model = Model([decoder_inputs] + decoder_states_inputs, [decoder_outputs] + decoder_states)\r\n'''\r\n#state = encoder_model.predict(source)\r\n\r\n#------------------------------------------------------------------------------------------\r\n'''\r\nn_headway_features = 28\r\nn_stop_features = X1_stopfeatures.shape[2]\r\nn_units = 64\r\nepochs = 30\r\nlearning_rate = 0.01\r\ndecay = 0 # Learning rate decay\r\noptimiser = keras.optimizers.Adam(lr=learning_rate, decay=decay)\r\nbatch_size = 64\r\n# define training encoder\r\nencoder_inputs = Input(shape=(None, n_stop_features))\r\nencoder = LSTM(n_units, return_state=True)\r\nencoder_outputs, state_h, state_c = encoder(encoder_inputs)\r\nencoder_states = [state_h, state_c]\r\n# define training decoder\r\ndecoder_inputs = Input(shape=(None, n_headway_features))\r\nprint(decoder_inputs)\r\nprint(decoder_inputs[:,:,0])\r\ndecoder_lstm = LSTM(n_units, return_sequences=True, return_state=True)\r\ndecoder_outputs, _, _ = decoder_lstm(decoder_inputs, initial_state=encoder_states)\r\ndecoder_dense = Dense(n_headway_features,activation='relu')\r\ndecoder_outputs = decoder_dense(decoder_outputs)\r\nmodel = Model(inputs = [encoder_inputs, decoder_inputs],outputs = decoder_outputs)\r\n#model.fit([X1_train_stopfeatures,X2_train_headway],y_train_target_headway,batch_size = batch_size,epochs = epochs,validation_split=0.2)\r\nprint('-------------------------------------------------------------')\r\nx = model.predict([X1_test_stopfeatures,X2_test_headway])\r\nallnum_real = 0\r\nallnum_pre = 0\r\naccnum = 0\r\noffset = 0\r\nallnum = 0\r\nthreshold_time = float(3/30)\r\nfor i in range(0,x.shape[0]):\r\n for index in range(0,n_headway_features):\r\n allnum+=1\r\n offset+=abs(list(y_test_target_headway[i,0,:])[index] - list(x[i,0,:])[index])\r\n if list(y_test_target_headway[i,0,:])[index] <= threshold_time:\r\n allnum_real+=1\r\n if list(x[i,0,:])[index] <= threshold_time:\r\n allnum_pre+=1\r\n if (list(x[i,0,:])[index] <= threshold_time) and (list(y_test_target_headway[i,0,:])[index] <= threshold_time):\r\n accnum+=1\r\nprint(\"allnum_real:\")\r\nprint(allnum_real)\r\nprint(\"allnum_pre:\")\r\nprint(allnum_pre)\r\nprint(\"accnum:\")\r\nprint(accnum)\r\nprint()\r\nprint()\r\nprint(offset/allnum)\r\n'''\r\n\r\n\r\n",
"import numpy as np\r\nimport pandas as pd\r\nfrom pandas import DataFrame, Series\r\nimport random\r\nimport time\r\nimport math\r\nimport datetime as dt\r\nimport random\r\nimport copy\r\nfrom datetime import datetime\r\nfrom time import mktime\r\nimport os\r\nimport json\r\n\r\nbusid = 412\r\npath = \"C:\\\\Users\\\\bdu\\\\Desktop\\\\gzy\\\\BusBunching\\\\BusData\\\\\"+str(busid)+\".csv\"\r\nopaldata = pd.read_csv(path)\r\n\r\nopaldata = opaldata[[\"TRIP_ID\",\"JS_STRT_DT_FK\",\"TAG1_TM\",\"TAG1_TS_NUM\",\"TAG1_TS_NM\",\"TAG1_LAT_VAL\", \"TAG1_LONG_VAL\",\"TAG2_TM\",\"TAG2_TS_NUM\",\"TAG2_TS_NM\",\"TAG2_LAT_VAL\", \"TAG2_LONG_VAL\"]]\r\n\r\n#drop the outliers\r\nopaldata = opaldata[~opaldata[\"TAG1_TS_NUM\"].isin([-1])]\r\nopaldata = opaldata[~opaldata[\"TAG2_TS_NUM\"].isin([-1])]\r\n\r\n\r\n'''\r\nprint(list(set(list(opaldata[\"TAG1_TS_NUM\"]))))\r\nprint(len(list(set(list(opaldata[\"TAG1_TS_NUM\"])))))\r\nprint()\r\nprint(list(set(list(opaldata[\"TAG2_TS_NUM\"]))))\r\nprint(len(list(set(list(opaldata[\"TAG2_TS_NUM\"])))))\r\n\r\nprint()\r\nprint()\r\nprint()\r\nstopsname = list(set(list(opaldata[\"TAG1_TS_NM\"])))\r\nprint(stopsname)\r\nprint(len(stopsname))\r\n'''\r\n#看一下每个站上车人数和下车人数各为多少\r\nstops_tapon = list(set(list(opaldata[\"TAG1_TS_NUM\"])))\r\nstops_tapoff = list(set(list(opaldata[\"TAG2_TS_NUM\"])))\r\nl = [i for i in stops_tapon if i in stops_tapoff]\r\ntapon = {}\r\ntapoff = {}\r\nfor i in l:\r\n grouped_bytapon = opaldata.groupby(\"TAG1_TS_NUM\")\r\n for name_tapon,group in grouped_bytapon:\r\n tapon[name_tapon] = group.shape[0]\r\n grouped_bytapoff = opaldata.groupby(\"TAG2_TS_NUM\")\r\n for name_tapoff,group in grouped_bytapon:\r\n tapoff[name_tapoff] = group.shape[0]\r\n #print(\"-----------------------------\")\r\n #print() \r\nprint(tapon)\r\nprint()\r\nprint(tapoff)\r\n\r\n#看一下不正常站点的地理位置\r\nstops = [5176,7364,8245,204219,204212]\r\n#for i in stops:\r\npartdata = opaldata[opaldata[\"TAG1_TS_NUM\"].isin([5176,7364,8245,204219,204212])]\r\nprint(partdata)\r\nprint(partdata.shape)\r\nprint()\r\nprint()\r\n#筛选出上下车乘客人数超过thresholdNum的站台\r\ncondidatestops = []\r\nthresholdNum = 10000\r\nfor stop in l:\r\n num_tapon = opaldata[opaldata[\"TAG1_TS_NUM\"] == stop].shape[0]\r\n num_tapoff = opaldata[opaldata[\"TAG2_TS_NUM\"] == stop].shape[0]\r\n if num_tapon + num_tapoff >= thresholdNum:\r\n condidatestops.append(stop)\r\nprint(condidatestops)\r\nprint(len(condidatestops))\r\nprint()\r\nprint(opaldata[opaldata[\"TAG1_TS_NUM\"].isin(condidatestops)])\r\n######################################\r\n'''\r\n#busids = [326,327,386,387,369,397,399,412]\r\nbusids = [326,327,386,387]\r\n\r\nfor busid in busids:\r\n\r\n path = \"C:\\\\Users\\\\bdu\\\\Desktop\\\\gzy\\\\BusBunching\\\\BusData\\\\\"+str(busid)+\".csv\"\r\n #path = \"C:\\\\Users\\\\bdu\\\\Desktop\\\\gzy\\\\BusBunching\\\\BusData\\\\2016-02-01.csv\"\r\n opaldata = pd.read_csv(path)\r\n opaldata = opaldata[[\"ROUTE_ID\",\"BUS_ID\",\"TRIP_ID\",\"JS_STRT_DT_FK\",\"TAG1_TM\",\"TAG1_TS_NUM\",\"TAG2_TM\",\"TAG2_TS_NUM\"]]\r\n\r\n #drop the outliers\r\n opaldata = opaldata[~opaldata[\"TAG1_TS_NUM\"].isin([-1])]\r\n opaldata = opaldata[~opaldata[\"TAG2_TS_NUM\"].isin([-1])]\r\n\r\n\r\n grouped_byday = opaldata.groupby(\"JS_STRT_DT_FK\")\r\n\r\n for name_byday,group_byday in grouped_byday:\r\n #a json file per day\r\n dataperday_json = {}\r\n\r\n group_byday.sort_values(by=['TAG1_TM'],inplace=True)\r\n\r\n tripnum_perday = 0\r\n\r\n grouped_bytrip = group_byday.groupby(\"TRIP_ID\")\r\n for name_bytrip,group_bytrip in grouped_bytrip:\r\n taponNum_pertrip = {}\r\n tapoffNum_pertrip = {}\r\n\r\n dwellTime = {}\r\n\r\n #tap on\r\n grouped_bytapon = group_bytrip.groupby(\"TAG1_TS_NUM\")\r\n for station_idtapon,grouppertrip_bytapon in grouped_bytapon:\r\n taponNum_pertrip[station_idtapon] = grouppertrip_bytapon.shape[0]\r\n \r\n #tap off\r\n grouped_bytapoff = group_bytrip.groupby(\"TAG2_TS_NUM\")\r\n for station_idtapoff,grouppertrip_bytapoff in grouped_bytapoff:\r\n tapoffNum_pertrip[station_idtapoff] = grouppertrip_bytapoff.shape[0]\r\n\r\n #all stops in the trip \r\n trip_allstops = list(set(list(group_bytrip[\"TAG1_TS_NUM\"])+list(group_bytrip[\"TAG2_TS_NUM\"])))\r\n #all stops passengers tap on\r\n trip_tapon_stops = list(set(list(group_bytrip[\"TAG1_TS_NUM\"])))\r\n #all stops passengers tap off\r\n trip_tapoff_stops = list(set(list(group_bytrip[\"TAG2_TS_NUM\"])))\r\n\r\n\r\n for stop in trip_allstops:\r\n #passengers tapon and tapoff at this stop\r\n if (stop in trip_tapon_stops) and (stop in trip_tapoff_stops):\r\n #print(\"passengers tapon and tapoff at this stop\")\r\n #print(stop)\r\n #passengers_Atstop = group_bytrip[(group_bytrip['TAG1_TS_NUM'] == stop) | (group_bytrip['TAG2_TS_NUM']==stop)]\r\n #print(passengers_Atstop)\r\n #print()\r\n\r\n passengers_Atstop = group_bytrip[(group_bytrip['TAG1_TS_NUM'] == stop) | (group_bytrip['TAG2_TS_NUM']==stop)]\r\n # find the first tapoff passenger time\r\n df1 = passengers_Atstop[(passengers_Atstop[\"TAG2_TS_NUM\"] == stop)]\r\n time1 = list(df1[\"TAG1_TM\"])[0]\r\n # find the last tapon passenger time\r\n df2 = passengers_Atstop[(passengers_Atstop[\"TAG1_TS_NUM\"] == stop)]\r\n time2 = list(df1[\"TAG2_TM\"])[len(list(df1[\"TAG2_TM\"]))-1]\r\n\r\n dwellTime[stop] = time2 - time1\r\n\r\n #passengers only tapon at this stop\r\n elif stop in trip_tapon_stops:\r\n #print(stop)\r\n #print(\"passengers only tapon at this stop\")\r\n #passengers_Atstop = group_bytrip[(group_bytrip['TAG1_TS_NUM'] == stop)]\r\n #print(passengers_Atstop)\r\n #print()\r\n\r\n passengers_Atstop = group_bytrip[(group_bytrip['TAG1_TS_NUM'] == stop)]\r\n # if only one passenger return 0 else return (last tapon time)-(first tapon time)\r\n if passengers_Atstop.shape[0] <= 1:\r\n dwellTime[stop] = 0\r\n else:\r\n time1 = list(passengers_Atstop[\"TAG1_TM\"])[0]\r\n time2 = list(passengers_Atstop[\"TAG1_TM\"])[len(list(passengers_Atstop[\"TAG1_TM\"]))-1]\r\n dwellTime[stop] = time2 - time1\r\n\r\n\r\n ##passengers only tapoff at this stop\r\n else:\r\n #print(stop)\r\n #print(\"passengers only tapoff at this stop\")\r\n #passengers_Atstop = group_bytrip[(group_bytrip['TAG2_TS_NUM'] == stop)]\r\n #print(passengers_Atstop)\r\n #print()\r\n\r\n passengers_Atstop = group_bytrip[(group_bytrip['TAG2_TS_NUM'] == stop)]\r\n # if only one passenger return 0 else return (last tapoff time)-(first tapoff time)\r\n if passengers_Atstop.shape[0] <= 1:\r\n dwellTime[stop] = 0\r\n else:\r\n time1 = list(passengers_Atstop[\"TAG2_TM\"])[0]\r\n time2 = list(passengers_Atstop[\"TAG2_TM\"])[len(list(passengers_Atstop[\"TAG2_TM\"]))-1]\r\n dwellTime[stop] = time2 - time1\r\n\r\n datapertrip = {\"tapon\":taponNum_pertrip,\"tapoff\":tapoffNum_pertrip,\"dwellTime\":dwellTime}\r\n\r\n dataperday_json[tripnum_perday] = datapertrip\r\n\r\n\r\n tripnum_perday+=1\r\n\r\n \r\n dataperday_json = json.dumps(dataperday_json)\r\n print(dataperday_json)\r\n path_json = \"C:\\\\Users\\\\bdu\\\\Desktop\\\\gzy\\\\BusBunching\\\\BusData\\\\huhao\\\\\"+str(busid)+\"-\"+str(name_byday)+\".json\"\r\n fileObject = open(path_json, 'w')\r\n fileObject.write(dataperday_json)\r\n fileObject.close()\r\n\r\n\r\n print(\"----------------------------\")\r\n'''\r\n\r\n\r\n"
] |
[
[
"numpy.load",
"numpy.array",
"numpy.random.randint"
],
[
"pandas.read_csv"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [
"2.0",
"1.4",
"1.1",
"1.5",
"1.2",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
MaplewithMoon/CodedVision
|
[
"b98a06f5014fde30f316433b145dd8c8bd777b47"
] |
[
"PascalLoader.py"
] |
[
"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 18 11:58:07 2017\n\n@author: Biagio Brattoli\n\"\"\"\n\nimport os, numpy as np\nimport torch\nimport torch.utils.data as data\nfrom scipy.misc import imread, imresize\nfrom scipy.sparse import csr_matrix\nfrom PIL import Image\nimport xml.etree.ElementTree as ET\nimport torchvision.transforms as transforms\n\nclass DataLoader(data.Dataset):\n def __init__(self,data_path,trainval,transform,random_crops=0):\n self.data_path = data_path\n self.transform = transform\n self.random_crops = random_crops\n self.trainval = trainval\n \n self.__init_classes()\n self.names, self.labels = self.__dataset_info()\n \n def __getitem__(self, index):\n x = imread(self.data_path+'/JPEGImages/'+self.names[index]+'.jpg',mode='RGB')\n x = Image.fromarray(x)\n \n scale = np.random.rand()*2+0.25\n w = int(x.size[0]*scale)\n h = int(x.size[1]*scale)\n if min(w,h)<227:\n scale = 227/min(w,h)\n w = int(x.size[0]*scale)\n h = int(x.size[1]*scale)\n \n #x = x.resize((w,h), Image.BILINEAR) # Random scale\n \n if self.random_crops==0:\n x = self.transform(x)\n else:\n crops = []\n for i in range(self.random_crops):\n crops.append(self.transform(x))\n x = torch.stack(crops)\n \n y = self.labels[index]\n return x, y\n \n def __len__(self):\n return len(self.names)\n \n def __dataset_info(self):\n #annotation_files = os.listdir(self.data_path+'/Annotations')\n with open(self.data_path+'/ImageSets/Main/'+self.trainval+'.txt') as f:\n annotations = f.readlines()\n \n annotations = [n[:-1] for n in annotations]\n \n names = []\n labels = []\n for af in annotations:\n if len(af)!=11:\n continue\n\t #print(af)\n filename = os.path.join(self.data_path,'Annotations',af)\n tree = ET.parse(filename+'.xml')\n objs = tree.findall('object')\n num_objs = len(objs)\n \n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n boxes_cl = np.zeros((num_objs), dtype=np.int32)\n \n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n \n cls = self.class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n boxes_cl[ix] = cls\n \n lbl = np.zeros(self.num_classes)\n lbl[boxes_cl] = 1\n labels.append(lbl)\n names.append(af)\n \n return np.array(names), np.array(labels).astype(np.float32)\n \n def __init_classes(self):\n self.classes = ('__background__','aeroplane', 'bicycle', 'bird', 'boat',\n 'bottle', 'bus', 'car', 'cat', 'chair',\n 'cow', 'diningtable', 'dog', 'horse',\n 'motorbike', 'person', 'pottedplant',\n 'sheep', 'sofa', 'train', 'tvmonitor')\n self.num_classes = len(self.classes)\n self.class_to_ind = dict(zip(self.classes, range(self.num_classes)))\n\n''' \nif __name__ == \"__main__\":\n normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\n train_transform = transforms.Compose([\n transforms.RandomResizedCrop(256),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n normalize,\n ])\n train_data = DataLoader('/home/maple/classification/pascal_test/data/VOC2012', 'trainval', transform=train_transform)\n'''\n"
] |
[
[
"numpy.random.rand",
"scipy.misc.imread",
"torch.stack",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"0.14",
"0.15",
"1.0",
"0.19",
"0.18",
"1.2",
"0.12",
"0.10",
"0.17",
"0.16"
],
"tensorflow": []
}
] |
yizhouzhao/GPointNet
|
[
"883585821bff89e29cbd22f70083966bc1854f2c"
] |
[
"new/aae/pcutil.py"
] |
[
"import matplotlib.pyplot as plt\nimport numpy as np\nfrom numpy.linalg import norm\n\n# Don't delete this line, even if PyCharm says it's an unused import.\n# It is required for projection='3d' in add_subplot()\n# from mpl_toolkits.mplot3d import Axes3D\n\n\ndef rand_rotation_matrix(deflection=1.0, seed=None):\n \"\"\"Creates a random rotation matrix.\n\n Args:\n deflection: the magnitude of the rotation. For 0, no rotation; for 1,\n completely random rotation. Small deflection => small\n perturbation.\n\n DOI: http://www.realtimerendering.com/resources/GraphicsGems/gemsiii/rand_rotation.c\n http://blog.lostinmyterminal.com/python/2015/05/12/random-rotation-matrix.html\n \"\"\"\n if seed is not None:\n np.random.seed(seed)\n\n theta, phi, z = np.random.uniform(size=(3,))\n\n theta = theta * 2.0 * deflection * np.pi # Rotation about the pole (Z).\n phi = phi * 2.0 * np.pi # For direction of pole deflection.\n z = z * 2.0 * deflection # For magnitude of pole deflection.\n\n # Compute a vector V used for distributing points over the sphere\n # via the reflection I - V Transpose(V). This formulation of V\n # will guarantee that if x[1] and x[2] are uniformly distributed,\n # the reflected points will be uniform on the sphere. Note that V\n # has length sqrt(2) to eliminate the 2 in the Householder matrix.\n\n r = np.sqrt(z)\n V = (np.sin(phi) * r,\n np.cos(phi) * r,\n np.sqrt(2.0 - z))\n\n st = np.sin(theta)\n ct = np.cos(theta)\n\n R = np.array(((ct, st, 0), (-st, ct, 0), (0, 0, 1)))\n\n # Construct the rotation matrix ( V Transpose(V) - I ) R.\n M = (np.outer(V, V) - np.eye(3)).dot(R)\n return M\n\n\ndef add_gaussian_noise_to_pcloud(pcloud, mu=0, sigma=1):\n gnoise = np.random.normal(mu, sigma, pcloud.shape[0])\n gnoise = np.tile(gnoise, (3, 1)).T\n pcloud += gnoise\n return pcloud\n\n\ndef add_rotation_to_pcloud(pcloud):\n r_rotation = rand_rotation_matrix()\n\n if len(pcloud.shape) == 2:\n return pcloud.dot(r_rotation)\n else:\n return np.asarray([e.dot(r_rotation) for e in pcloud])\n\n\ndef apply_augmentations(batch, conf):\n if conf.gauss_augment is not None or conf.z_rotate:\n batch = batch.copy()\n\n if conf.gauss_augment is not None:\n mu = conf.gauss_augment['mu']\n sigma = conf.gauss_augment['sigma']\n batch += np.random.normal(mu, sigma, batch.shape)\n\n if conf.z_rotate:\n r_rotation = rand_rotation_matrix()\n r_rotation[0, 2] = 0\n r_rotation[2, 0] = 0\n r_rotation[1, 2] = 0\n r_rotation[2, 1] = 0\n r_rotation[2, 2] = 1\n batch = batch.dot(r_rotation)\n return batch\n\n\ndef unit_cube_grid_point_cloud(resolution, clip_sphere=False):\n \"\"\"Returns the center coordinates of each cell of a 3D grid with\n resolution^3 cells, that is placed in the unit-cube.\n If clip_sphere it True it drops the \"corner\" cells that lie outside\n the unit-sphere.\n \"\"\"\n grid = np.ndarray((resolution, resolution, resolution, 3), np.float32)\n spacing = 1.0 / float(resolution - 1)\n for i in range(resolution):\n for j in range(resolution):\n for k in range(resolution):\n grid[i, j, k, 0] = i * spacing - 0.5\n grid[i, j, k, 1] = j * spacing - 0.5\n grid[i, j, k, 2] = k * spacing - 0.5\n\n if clip_sphere:\n grid = grid.reshape(-1, 3)\n grid = grid[norm(grid, axis=1) <= 0.5]\n\n return grid, spacing\n\n\ndef plot_3d_point_cloud(x, y, z, show=True, show_axis=True, in_u_sphere=False,\n marker='.', s=8, alpha=.8, figsize=(5, 5), elev=10,\n azim=240, axis=None, title=None, *args, **kwargs):\n plt.switch_backend('agg')\n if axis is None:\n fig = plt.figure(figsize=figsize)\n ax = fig.add_subplot(111, projection='3d')\n else:\n ax = axis\n fig = axis\n\n if title is not None:\n plt.title(title)\n\n sc = ax.scatter(x, y, z, marker=marker, s=s, alpha=alpha, *args, **kwargs)\n ax.view_init(elev=elev, azim=azim)\n\n if in_u_sphere:\n ax.set_xlim3d(-0.5, 0.5)\n ax.set_ylim3d(-0.5, 0.5)\n ax.set_zlim3d(-0.5, 0.5)\n else:\n # Multiply with 0.7 to squeeze free-space.\n miv = 0.7 * np.min([np.min(x), np.min(y), np.min(z)])\n mav = 0.7 * np.max([np.max(x), np.max(y), np.max(z)])\n ax.set_xlim(miv, mav)\n ax.set_ylim(miv, mav)\n ax.set_zlim(miv, mav)\n plt.tight_layout()\n\n if not show_axis:\n plt.axis('off')\n\n if 'c' in kwargs:\n plt.colorbar(sc)\n\n if show:\n plt.show()\n\n return fig\n\n\ndef transform_point_clouds(X, only_z_rotation=False, deflection=1.0):\n r_rotation = rand_rotation_matrix(deflection)\n if only_z_rotation:\n r_rotation[0, 2] = 0\n r_rotation[2, 0] = 0\n r_rotation[1, 2] = 0\n r_rotation[2, 1] = 0\n r_rotation[2, 2] = 1\n X = X.dot(r_rotation).astype(np.float32)\n return X\n"
] |
[
[
"numpy.sqrt",
"numpy.ndarray",
"numpy.max",
"matplotlib.pyplot.tight_layout",
"numpy.eye",
"numpy.sin",
"matplotlib.pyplot.axis",
"numpy.outer",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"numpy.min",
"matplotlib.pyplot.switch_backend",
"numpy.array",
"matplotlib.pyplot.show",
"numpy.random.seed",
"numpy.cos",
"numpy.tile",
"numpy.linalg.norm",
"matplotlib.pyplot.colorbar",
"numpy.random.normal",
"numpy.random.uniform"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Xirider/tdnc
|
[
"4e2b18dd3dd9e160fef42e89506aaf3b6e15fe12"
] |
[
"argmax_task.py"
] |
[
"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport warnings\nwarnings.filterwarnings('ignore')\n\nimport numpy as np\nimport getopt\nimport sys\nimport os\nimport math\nimport time\nimport argparse\nfrom visdom import Visdom\n\nsys.path.insert(0, os.path.join('..', '..'))\n\nimport torch as T\nfrom torch.autograd import Variable as var\nimport torch.nn.functional as F\nimport torch.optim as optim\n\nfrom torch.nn.utils import clip_grad_norm\n\nfrom dnc import DNC\n# from sdnc import SDNC\nfrom sam import SAM\nfrom util import *\n\nparser = argparse.ArgumentParser(description='PyTorch Differentiable Neural Computer')\nparser.add_argument('-input_size', type=int, default=6, help='dimension of input feature')\nparser.add_argument('-rnn_type', type=str, default='lstm', help='type of recurrent cells to use for the controller')\nparser.add_argument('-nhid', type=int, default=100, help='number of hidden units of the inner nn')\nparser.add_argument('-dropout', type=float, default=0, help='controller dropout')\nparser.add_argument('-memory_type', type=str, default='dnc', help='dense or sparse memory: dnc | sdnc | sam')\n\nparser.add_argument('-nlayer', type=int, default=1, help='number of layers')\nparser.add_argument('-nhlayer', type=int, default=2, help='number of hidden layers')\nparser.add_argument('-lr', type=float, default=1e-4, help='initial learning rate')\nparser.add_argument('-optim', type=str, default='adam', help='learning rule, supports adam|rmsprop')\nparser.add_argument('-clip', type=float, default=50, help='gradient clipping')\n\nparser.add_argument('-batch_size', type=int, default=100, metavar='N', help='batch size')\nparser.add_argument('-mem_size', type=int, default=20, help='memory dimension')\nparser.add_argument('-mem_slot', type=int, default=16, help='number of memory slots')\nparser.add_argument('-read_heads', type=int, default=4, help='number of read heads')\nparser.add_argument('-sparse_reads', type=int, default=10, help='number of sparse reads per read head')\nparser.add_argument('-temporal_reads', type=int, default=2, help='number of temporal reads')\n\nparser.add_argument('-sequence_max_length', type=int, default=4, metavar='N', help='sequence_max_length')\nparser.add_argument('-cuda', type=int, default=-1, help='Cuda GPU ID, -1 for CPU')\n\nparser.add_argument('-iterations', type=int, default=2000, metavar='N', help='total number of iteration')\nparser.add_argument('-summarize_freq', type=int, default=100, metavar='N', help='summarize frequency')\nparser.add_argument('-check_freq', type=int, default=100, metavar='N', help='check point frequency')\nparser.add_argument('-visdom', action='store_true', help='plot memory content on visdom per -summarize_freq steps')\n\nargs = parser.parse_args()\nprint(args)\n\nviz = Visdom()\n# assert viz.check_connection()\n\nif args.cuda != -1:\n print('Using CUDA.')\n T.manual_seed(1111)\nelse:\n print('Using CPU.')\n\ndef llprint(message):\n sys.stdout.write(message)\n sys.stdout.flush()\n\n\ndef onehot(x, n):\n ret = np.zeros(n).astype(np.float32)\n ret[x] = 1.0\n return ret\n\n\ndef generate_data(length, size):\n\n content = np.random.randint(0, size - 1, length)\n\n seqlen = length + 1\n x_seq_list = [float('nan')] * seqlen\n max_value = 0\n max_ind = 0\n for i in range(seqlen):\n if (i < length):\n x_seq_list[i] = onehot(content[i], size)\n if (max_value <= content[i]):\n max_value = content[i]\n max_ind = i\n else:\n x_seq_list[i] = onehot(size - 1, size)\n\n x_seq_list = np.array(x_seq_list)\n x_seq_list = x_seq_list.reshape((1,) + x_seq_list.shape)\n x_seq_list = np.reshape(x_seq_list, (1, -1, size))\n\n target_output = np.zeros((1, 1, seqlen), dtype=np.float32)\n target_output[:, -1, -1] = max_ind\n target_output = np.reshape(target_output, (1, -1, 1))\n\n weights_vec = np.zeros((1, 1, seqlen), dtype=np.float32)\n weights_vec[:, -1, -1] = 1.0\n weights_vec = np.reshape(weights_vec, (1, -1, 1))\n\n return cudavec(x_seq_list, gpu_id=args.cuda).float(), \\\n cudavec(target_output, gpu_id=args.cuda).float(), \\\n cudavec(weights_vec, gpu_id=args.cuda)\n\n\nif __name__ == '__main__':\n\n dirname = os.path.dirname(__file__)\n ckpts_dir = os.path.join(dirname, 'checkpoints')\n\n input_size = args.input_size\n memory_type = args.memory_type\n lr = args.lr\n clip = args.clip\n batch_size = args.batch_size\n sequence_max_length = args.sequence_max_length\n cuda = args.cuda\n iterations = args.iterations\n summarize_freq = args.summarize_freq\n check_freq = args.check_freq\n visdom = args.visdom\n print(visdom)\n\n from_checkpoint = None\n\n if args.memory_type == 'dnc':\n rnn = DNC(\n input_size=args.input_size,\n hidden_size=args.nhid,\n rnn_type=args.rnn_type,\n num_layers=args.nlayer,\n num_hidden_layers=args.nhlayer,\n dropout=args.dropout,\n nr_cells=args.mem_slot,\n cell_size=args.mem_size,\n read_heads=args.read_heads,\n gpu_id=args.cuda,\n debug=args.visdom,\n batch_first=True,\n independent_linears=False\n )\n elif args.memory_type == 'sdnc':\n rnn = SDNC(\n input_size=args.input_size,\n hidden_size=args.nhid,\n rnn_type=args.rnn_type,\n num_layers=args.nlayer,\n num_hidden_layers=args.nhlayer,\n dropout=args.dropout,\n nr_cells=args.mem_slot,\n cell_size=args.mem_size,\n sparse_reads=args.sparse_reads,\n temporal_reads=args.temporal_reads,\n read_heads=args.read_heads,\n gpu_id=args.cuda,\n debug=args.visdom,\n batch_first=True,\n independent_linears=False\n )\n elif args.memory_type == 'sam':\n rnn = SAM(\n input_size=args.input_size,\n hidden_size=args.nhid,\n rnn_type=args.rnn_type,\n num_layers=args.nlayer,\n num_hidden_layers=args.nhlayer,\n dropout=args.dropout,\n nr_cells=args.mem_slot,\n cell_size=args.mem_size,\n sparse_reads=args.sparse_reads,\n read_heads=args.read_heads,\n gpu_id=args.cuda,\n debug=args.visdom,\n batch_first=True,\n independent_linears=False\n )\n else:\n raise Exception('Not recognized type of memory')\n\n if args.cuda != -1:\n rnn = rnn.cuda(args.cuda)\n\n print(rnn)\n\n last_save_losses = []\n\n if args.optim == 'adam':\n optimizer = optim.Adam(rnn.parameters(), lr=args.lr, eps=1e-9, betas=[0.9, 0.98]) # 0.0001\n elif args.optim == 'adamax':\n optimizer = optim.Adamax(rnn.parameters(), lr=args.lr, eps=1e-9, betas=[0.9, 0.98]) # 0.0001\n elif args.optim == 'rmsprop':\n optimizer = optim.RMSprop(rnn.parameters(), lr=args.lr, momentum=0.9, eps=1e-10) # 0.0001\n elif args.optim == 'sgd':\n optimizer = optim.SGD(rnn.parameters(), lr=args.lr) # 0.01\n elif args.optim == 'adagrad':\n optimizer = optim.Adagrad(rnn.parameters(), lr=args.lr)\n elif args.optim == 'adadelta':\n optimizer = optim.Adadelta(rnn.parameters(), lr=args.lr)\n\n last_100_losses = []\n\n (chx, mhx, rv) = (None, None, None)\n for epoch in range(iterations + 1):\n llprint(\"\\rIteration {ep}/{tot}\".format(ep=epoch, tot=iterations))\n optimizer.zero_grad()\n\n # We use for training just (sequence_max_length / 10) examples\n random_length = np.random.randint(2, (sequence_max_length) + 1)\n input_data, target_output, loss_weights = generate_data(random_length, input_size)\n\n if rnn.debug:\n output, (chx, mhx, rv), v = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)\n\n else:\n output, (chx, mhx, rv) = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)\n\n loss = T.mean(((loss_weights * output).sum(-1, keepdim=True) - target_output) ** 2)\n\n loss.backward()\n\n T.nn.utils.clip_grad_norm(rnn.parameters(), args.clip)\n optimizer.step()\n loss_value = loss.item()\n\n # detach memory from graph\n mhx = { k : (v.detach() if isinstance(v, var) else v) for k, v in mhx.items() }\n\n summarize = (epoch % summarize_freq == 0)\n take_checkpoint = (epoch != 0) and (epoch % iterations == 0)\n\n last_100_losses.append(loss_value)\n\n try:\n if summarize:\n output = (loss_weights * output).sum().data.cpu().numpy().item()\n target_output = target_output.sum().data.cpu().numpy().item()\n\n\n\n if args.memory_type == 'dnc':\n viz.heatmap(\n v['memory'],\n opts=dict(\n xtickstep=10,\n ytickstep=2,\n title='Memory, t: ' + str(epoch) + ', loss: ' + str(loss),\n ylabel='layer * time',\n xlabel='mem_slot * mem_size'\n )\n )\n\n if args.memory_type == 'dnc':\n viz.heatmap(\n v['link_matrix'][-1].reshape(args.mem_slot, args.mem_slot),\n opts=dict(\n xtickstep=10,\n ytickstep=2,\n title='Link Matrix, t: ' + str(epoch) + ', loss: ' + str(loss),\n ylabel='mem_slot',\n xlabel='mem_slot'\n )\n )\n elif args.memory_type == 'sdnc':\n viz.heatmap(\n v['link_matrix'][-1].reshape(args.mem_slot, -1),\n opts=dict(\n xtickstep=10,\n ytickstep=2,\n title='Link Matrix, t: ' + str(epoch) + ', loss: ' + str(loss),\n ylabel='mem_slot',\n xlabel='mem_slot'\n )\n )\n\n viz.heatmap(\n v['rev_link_matrix'][-1].reshape(args.mem_slot, -1),\n opts=dict(\n xtickstep=10,\n ytickstep=2,\n title='Reverse Link Matrix, t: ' + str(epoch) + ', loss: ' + str(loss),\n ylabel='mem_slot',\n xlabel='mem_slot'\n )\n )\n\n elif args.memory_type == 'sdnc' or args.memory_type == 'dnc':\n viz.heatmap(\n v['precedence'],\n opts=dict(\n xtickstep=10,\n ytickstep=2,\n title='Precedence, t: ' + str(epoch) + ', loss: ' + str(loss),\n ylabel='layer * time',\n xlabel='mem_slot'\n )\n )\n\n if args.memory_type == 'sdnc':\n viz.heatmap(\n v['read_positions'],\n opts=dict(\n xtickstep=10,\n ytickstep=2,\n title='Read Positions, t: ' + str(epoch) + ', loss: ' + str(loss),\n ylabel='layer * time',\n xlabel='mem_slot'\n )\n )\n\n viz.heatmap(\n v['read_weights'],\n opts=dict(\n xtickstep=10,\n ytickstep=2,\n title='Read Weights, t: ' + str(epoch) + ', loss: ' + str(loss),\n ylabel='layer * time',\n xlabel='nr_read_heads * mem_slot'\n )\n )\n\n viz.heatmap(\n v['write_weights'],\n opts=dict(\n xtickstep=10,\n ytickstep=2,\n title='Write Weights, t: ' + str(epoch) + ', loss: ' + str(loss),\n ylabel='layer * time',\n xlabel='mem_slot'\n )\n )\n\n viz.heatmap(\n v['usage_vector'] if args.memory_type == 'dnc' else v['usage'],\n opts=dict(\n xtickstep=10,\n ytickstep=2,\n title='Usage Vector, t: ' + str(epoch) + ', loss: ' + str(loss),\n ylabel='layer * time',\n xlabel='mem_slot'\n )\n )\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n \n\n llprint(\"\\rIteration %d/%d\" % (epoch, iterations))\n llprint(\"\\nAvg. Logistic Loss: %.4f\\n\" % (np.mean(last_100_losses)))\n print(target_output)\n print(\"Real value: \", ' = ' + str(int(target_output)))\n print(\"Predicted: \", ' = ' + str(int(output // 1)) + \" [\" + str(output) + \"]\")\n last_100_losses = []\n\n if take_checkpoint:\n llprint(\"\\nSaving Checkpoint ... \"),\n check_ptr = os.path.join(ckpts_dir, 'step_{}.pth'.format(epoch))\n cur_weights = rnn.state_dict()\n T.save(cur_weights, check_ptr)\n llprint(\"Done!\\n\")\n except Exception as e:\n pass\n\n llprint(\"\\nTesting generalization...\\n\")\n\n rnn.eval()\n\n for i in range(int((iterations + 1) / 10)):\n llprint(\"\\nIteration %d/%d\" % (i, iterations))\n # We test now the learned generalization using sequence_max_length examples\n random_length = np.random.randint(2, sequence_max_length * 2 + 1)\n input_data, target_output, loss_weights = generate_data(random_length, input_size)\n\n if rnn.debug:\n output, (chx, mhx, rv), v = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)\n else:\n output, (chx, mhx, rv) = rnn(input_data, (None, mhx, None), reset_experience=True, pass_through_memory=True)\n output = output[:, -1, :].sum().data.cpu().numpy().item()\n target_output = target_output.sum().data.cpu().numpy().item()\n try:\n print(\"\\nReal value: \", ' = ' + str(int(target_output)))\n print(\"Predicted: \", ' = ' + str(int(output // 1)) + \" [\" + str(output) + \"]\")\n except Exception as e:\n pass\n"
] |
[
[
"numpy.reshape",
"torch.manual_seed",
"torch.save",
"numpy.mean",
"numpy.array",
"numpy.zeros",
"numpy.random.randint"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
nisheetsun/lost
|
[
"44678b8e3cfe8e640c05e868e2bc9e19d8b09252"
] |
[
"backend/lost/db/model.py"
] |
[
"__author__ = 'Jonas Jaeger, Gereon Reus'\nfrom flask_user import current_user, UserMixin\nfrom werkzeug.security import generate_password_hash, check_password_hash\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import Column, Integer, String, DateTime, Float, Text, Boolean\nfrom sqlalchemy.dialects.mysql import DATETIME\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy.schema import MetaData\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy import orm\nfrom lost.db import dtype\nimport json\nimport pandas as pd\n\n# Set conventions for foreign key name generation\nconvention = {\n \"ix\": 'ix_%(column_0_label)s',\n \"uq\": \"uq_%(table_name)s_%(column_0_name)s\",\n # \"ck\": \"ck_%(table_name)s_%(constraint_name)s\",\n \"fk\": \"fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s\",\n \"pk\": \"pk_%(table_name)s\"\n}\nmetadata = MetaData(naming_convention=convention)\nBase = declarative_base(metadata=metadata)\n\n# Define the User data-model.\n# NB: Make sure to add flask_user UserMixin !!!\n\n\nclass User(Base, UserMixin):\n __tablename__ = 'user'\n\n idx = Column(Integer, primary_key=True)\n is_active = Column('is_active', Boolean(),\n nullable=False, server_default='1')\n user_name = Column(String(100), nullable=False, unique=True)\n email = Column(String(255), unique=True)\n email_confirmed_at = Column(DateTime())\n password = Column(String(255), nullable=False, server_default='')\n\n # User information\n first_name = Column(String(100), server_default='')\n last_name = Column(String(100), server_default='')\n\n confidence_level = Column(Integer)\n photo_path = Column(String(4096))\n\n roles = relationship('Role', secondary='user_roles', lazy='joined')\n groups = relationship('Group', secondary='user_groups', lazy='joined')\n choosen_anno_task = relationship(\n 'AnnoTask', secondary='choosen_anno_task', lazy='joined', uselist=False)\n\n def __init__(self, user_name, password, email=None, first_name=None, last_name=None, email_confirmed_at=None):\n self.user_name = user_name\n self.email = email\n self.email_confirmed_at = email_confirmed_at\n self.set_password(password)\n self.first_name = first_name\n self.last_name = last_name\n\n def set_password(self, password):\n self.password = generate_password_hash(password)\n\n def check_password(self, password):\n return check_password_hash(self.password, password)\n\n def has_role(self, role):\n role_names = []\n for r in self.roles:\n role_names.append(r.name)\n if role in role_names:\n return True\n else:\n return False\n\n\n# Define the Role data-model\nclass Role(Base):\n __tablename__ = 'role'\n idx = Column(Integer(), primary_key=True)\n name = Column(String(50), unique=True)\n\n# Define the UserRoles association table\n\n\nclass UserRoles(Base):\n __tablename__ = 'user_roles'\n idx = Column(Integer(), primary_key=True)\n user_id = Column(Integer(), ForeignKey('user.idx', ondelete='CASCADE'))\n role_id = Column(Integer(), ForeignKey('role.idx', ondelete='CASCADE'))\n role = relationship('Role', uselist=False)\n\n\nclass Group(Base):\n __tablename__ = 'group'\n idx = Column(Integer(), primary_key=True)\n name = Column(String(50), unique=True)\n manager_id = Column(Integer(), ForeignKey('user.idx', ondelete='CASCADE'))\n is_user_default = Column(Boolean(), nullable=False, server_default='0')\n users = relationship(\"User\", secondary=\"user_groups\")\n\n\nclass UserGroups(Base):\n __tablename__ = 'user_groups'\n idx = Column(Integer(), primary_key=True)\n user_id = Column(Integer(), ForeignKey('user.idx', ondelete='CASCADE'))\n group_id = Column(Integer(), ForeignKey('group.idx', ondelete='CASCADE'))\n group = relationship('Group', uselist=False)\n\n\nclass TwoDAnno(Base):\n \"\"\"A TwoDAnno represents a 2D annotation/ drawing for an image.\n\n A TwoDAnno can be of type point, line, bbox or polygon.\n\n Attributes:\n idx (int): ID of this TwoDAnno in database\n anno_task_id (int): ID of the anno_task this TwoDAnno\n belongs to.\n timestamp (DateTime): Timestamp created of TwoDAnno\n timestamp_lock (DateTime): Timestamp locked in view\n state (enum): can be unlocked, locked, locked_priority or labeled\n (see :class:`lost.db.state.Anno`)\n track_id (int): The track id this TwoDAnno belongs to.\n sim_class (int): The similarity class this anno belong to.\n It is used to cluster similar annos in MIA.\n iteration (int): The iteration of a loop when this anno was created.\n user_id (int): Id of the annotator.\n img_anno_id (int) : ID of ImageAnno this TwoDAnno is appended to\n data (Text): drawing data (for e.g. x,y, width, height) of anno - depends on dtype\n dtype (int): type of TwoDAnno (for e.g. bbox, polygon)\n (see :class:`lost.db.dtype.TwoDAnno`)\n labels (list): A list of :class:`Label` objects related to the TwoDAnno.\n confidence (float): Confidence of Annotation.\n anno_time: Overall Annotation Time in ms.\n description (str): Description for this annotation. Assigned by an \n annotator or algorithm.\n \"\"\"\n __tablename__ = \"two_d_anno\"\n\n idx = Column(Integer, primary_key=True)\n anno_task_id = Column(Integer, ForeignKey('anno_task.idx'))\n timestamp = Column(DATETIME(fsp=6))\n timestamp_lock = Column(DATETIME(fsp=6))\n state = Column(Integer)\n track_id = Column(Integer, ForeignKey('track.idx'))\n data = Column(Text)\n dtype = Column(Integer)\n sim_class = Column(Integer)\n iteration = Column(Integer)\n user_id = Column(Integer, ForeignKey('user.idx'))\n img_anno_id = Column(Integer, ForeignKey('image_anno.idx'))\n labels = relationship('Label') # type: Label\n annotator = relationship('User', uselist=False) # type: User\n confidence = Column(Float)\n anno_time = Column(Float)\n description = Column(Text)\n\n def __init__(self, anno_task_id=None,\n user_id=None, timestamp=None, state=None,\n track_id=None, sim_class=None,\n img_anno_id=None, timestamp_lock=None,\n iteration=0, data=None, dtype=None,\n confidence=None, anno_time=None,\n description=None\n ):\n self.anno_task_id = anno_task_id\n self.user_id = user_id\n self.timestamp = timestamp\n self.timestamp_lock = timestamp_lock\n self.state = state\n self.track_id = track_id\n self.sim_class = sim_class\n self.img_anno_id = img_anno_id\n self.data = data\n self.dtype = dtype\n self.iteration = iteration\n self.confidence = confidence\n self.anno_time = anno_time\n self.description = description\n # if label_leaf_id is not None:\n # self.label = Label(label_leaf_id=label_leaf_id)\n\n def to_dict(self, style='flat'):\n '''Transform this object into a dict.\n\n Args:\n style (str): 'flat' or 'hierarchical'\n 'flat': Return a dictionray in table style\n 'hierarchical': Return a nested dictionary\n\n Retruns:\n dict: In flat or hierarchical style.\n\n Example:\n Get a dict in flat style. Note that 'anno.data',\n 'anno.lbl.idx', 'anno.lbl.name' and 'anno.lbl.external_id'\n are json strings in contrast to the *hierarchical* style.\n\n >>> bbox.to_dict(style='flat')\n {\n 'anno.idx': 88, \n 'anno.anno_task_id': None, \n 'anno.timestamp': None, \n 'anno.timestamp_lock': None, \n 'anno.state': None, \n 'anno.track_id': None, \n 'anno.dtype': 'bbox', \n 'anno.sim_class': None, \n 'anno.iteration': 0, \n 'anno.user_id': 47, \n 'anno.img_anno_id': None, \n 'anno.annotator': 'test', \n 'anno.confidence': None, \n 'anno.anno_time': None, \n 'anno.lbl.idx': '[\"14\"]', \n 'anno.lbl.name': '[\"Aeroplane\"]', \n 'anno.lbl.external_id': '[\"6\"]', \n 'anno.data': '{\"x\": 0.1, \"y\": 0.1, \"w\": 0.2, \"h\": 0.2}'\n }\n\n Get a dict in hierarchical style. Note that 'anno.data'\n is a dict in contrast to the *flat* style.\n\n >>> bbox.to_dict(style='hierarchical')\n {\n 'anno.idx': 86, \n 'anno.anno_task_id': None, \n 'anno.timestamp': None, \n 'anno.timestamp_lock': None, \n 'anno.state': None, \n 'anno.track_id': None, \n 'anno.dtype': 'bbox', \n 'anno.sim_class': None, \n 'anno.iteration': 0, \n 'anno.user_id': 46, \n 'anno.img_anno_id': None, \n 'anno.annotator': 'test', \n 'anno.confidence': None, \n 'anno.anno_time': None, \n 'anno.lbl.idx': [14], \n 'anno.lbl.name': ['Aeroplane'], \n 'anno.lbl.external_id': ['6'], \n 'anno.data': {\n 'x': 0.1, 'y': 0.1, 'w': 0.2, 'h': 0.2\n }\n }\n '''\n anno_dict = {\n 'anno.idx': self.idx,\n 'anno.anno_task_id': self.anno_task_id,\n 'anno.timestamp': self.timestamp,\n 'anno.timestamp_lock': self.timestamp_lock,\n 'anno.state': self.state,\n 'anno.track_id': self.track_id,\n 'anno.dtype': None,\n 'anno.sim_class': self.sim_class,\n 'anno.iteration': self.iteration,\n 'anno.user_id': self.user_id,\n 'anno.img_anno_id': self.img_anno_id,\n 'anno.annotator': None,\n 'anno.confidence': self.confidence,\n 'anno.anno_time': self.anno_time,\n 'anno.lbl.idx': None,\n 'anno.lbl.name': None,\n 'anno.lbl.external_id': None\n }\n try:\n anno_dict['anno.dtype'] = dtype.TwoDAnno.TYPE_TO_STR[self.dtype]\n except:\n pass\n try:\n anno_dict['anno.lbl.idx'] = [\n lbl.label_leaf.idx for lbl in self.labels]\n anno_dict['anno.lbl.name'] = [\n lbl.label_leaf.name for lbl in self.labels]\n anno_dict['anno.lbl.external_id'] = [\n lbl.label_leaf.external_id for lbl in self.labels]\n except:\n pass\n try:\n anno_dict['anno.annotator'] = self.annotator.first_name + \\\n ' ' + self.annotator.last_name\n except:\n pass\n\n if style == 'flat':\n anno_dict['anno.data'] = self.data\n anno_dict['anno.lbl.idx'] = json.dumps(anno_dict['anno.lbl.idx'])\n anno_dict['anno.lbl.name'] = json.dumps(anno_dict['anno.lbl.name'])\n anno_dict['anno.lbl.external_id'] = json.dumps(\n anno_dict['anno.lbl.external_id'])\n return anno_dict\n elif style == 'hierarchical':\n anno_dict['anno.data'] = json.loads(self.data)\n return anno_dict\n else:\n raise ValueError(\n 'Unknow style argument! Needs to be \"flat\" or \"hierarchical\".')\n\n def to_df(self):\n '''Transform this annotation into a pandas DataFrame\n\n Returns:\n pandas.DataFrame: \n A DataFrame where column names correspond\n to the keys of the dictionary returned from *to_dict()*\n method.\n\n Note:\n Column names are:\n ['anno.idx', 'anno.anno_task_id', 'anno.timestamp', \n 'anno.timestamp_lock', 'anno.state', 'anno.track_id', \n 'anno.dtype', 'anno.sim_class', \n 'anno.iteration', 'anno.user_id', 'anno.img_anno_id', \n 'anno.annotator', 'anno.confidence', 'anno.anno_time', \n 'anno.lbl.idx', 'anno.lbl.name', 'anno.lbl.external_id', \n 'anno.data']\n '''\n return pd.DataFrame(self.to_dict(), index=[0])\n\n def to_vec(self, columns='all'):\n '''Tansfrom this annotation in list style.\n\n Args:\n columns (list of str OR str): Possible column names are:\n 'all' OR\n ['anno.idx', 'anno.anno_task_id', 'anno.timestamp', \n 'anno.timestamp_lock', 'anno.state', 'anno.track_id', \n 'anno.dtype', 'anno.sim_class', \n 'anno.iteration', 'anno.user_id', 'anno.img_anno_id', \n 'anno.annotator', 'anno.confidence', 'anno.anno_time', \n 'anno.lbl.idx', 'anno.lbl.name', 'anno.lbl.external_id', \n 'anno.data']\n Returns:\n list of objects: A list of the desired columns.\n\n Example:\n If you want to get only the annotation in list style \n e.g. [x, y, w, h] (if this TwoDAnnotation is a bbox).\n\n >>> anno.to_vec('anno.data')\n [0.1, 0.1, 0.2, 0.2]\n\n If you want in addition also the corresponding *label names*\n and *label ids* for this annotation then just add additional\n column names:\n\n >>> bbox.to_vec(['anno.data', 'anno.lbl.idx', 'anno.lbl.name'])\n [[0.1, 0.1, 0.2, 0.2], \"[14]\", \"['Aeroplane']\"]\n '''\n df = self.to_df().drop(columns=['anno.data'])\n df_new = df.assign(data=[self.get_anno_vec()])\n df_new = df_new.rename(index=str, columns={'data': 'anno.data'})\n if columns == 'all':\n return df_new.values.tolist()[0]\n else:\n return df_new[columns].values.tolist()[0]\n\n def add_label(self, label_leaf_id):\n '''Add a label to this 2D annotation.\n\n Args:\n label_leaf_id (int): Id of the label_leaf that should be added.\n '''\n if label_leaf_id is not None:\n lbl = Label(label_leaf_id=label_leaf_id)\n self.labels.append(lbl)\n\n @property\n def point(self):\n '''list: POINT annotation in list style [x, y]\n\n Example:\n >>> anno = TwoDAnno()\n >>> anno.point = [0.1, 0.1]\n >>> anno.point\n [0.1, 0.1]\n '''\n if self.dtype == dtype.TwoDAnno.POINT:\n return self.get_anno_vec()\n else:\n raise Exception('''Can not use point property \n since this annotation is no point! \n It is a {}'''.format(dtype.TwoDAnno.TYPE_TO_STR[self.dtype].upper()))\n\n @point.setter\n def point(self, value):\n self.data = json.dumps(\n {\n 'x': value[0],\n 'y': value[1]\n }\n )\n self.dtype = dtype.TwoDAnno.POINT\n\n @property\n def bbox(self):\n '''list: BBOX annotation in list style [x, y, w, h]\n\n Example:\n >>> anno = TwoDAnno()\n >>> anno.bbox = [0.1, 0.1, 0.2, 0.2]\n >>> anno.bbox\n [0.1, 0.1, 0.2, 0.2]\n '''\n if self.dtype == dtype.TwoDAnno.BBOX:\n return self.get_anno_vec()\n else:\n raise Exception('''Can not use bbox property \n since this annotation is no BBOX! \n It is a {}'''.format(dtype.TwoDAnno.TYPE_TO_STR[self.dtype].upper()))\n\n @bbox.setter\n def bbox(self, value):\n self.data = json.dumps(\n {\n 'x': value[0],\n 'y': value[1],\n 'w': value[2],\n 'h': value[3]\n }\n )\n self.dtype = dtype.TwoDAnno.BBOX\n\n @property\n def line(self):\n '''list of list: LINE annotation in list style [[x, y], [x, y], ...]\n\n Example:\n >>> anno = TwoDAnno()\n >>> anno.line = [[0.1, 0.1], [0.2, 0.2]]\n >>> anno.line\n [[0.1, 0.1], [0.2, 0.2]]\n '''\n if self.dtype == dtype.TwoDAnno.LINE:\n return self.get_anno_vec()\n else:\n raise Exception('''Can not use line property \n since this annotation is no line! \n It is a {}'''.format(dtype.TwoDAnno.TYPE_TO_STR[self.dtype].upper()))\n\n @line.setter\n def line(self, value):\n val_list = [{'x': v[0], 'y':v[1]} for v in value]\n self.data = json.dumps(val_list)\n self.dtype = dtype.TwoDAnno.LINE\n\n @property\n def polygon(self):\n '''list of list: polygon annotation in list style [[x, y], [x, y], ...]\n\n Example:\n >>> anno = TwoDAnno()\n >>> anno.polygon = [[0.1, 0.1], [0.2, 0.1], [0.15, 0.2]]\n >>> anno.polygon\n [[0.1, 0.1], [0.2, 0.1], [0.15, 0.2]]\n '''\n if self.dtype == dtype.TwoDAnno.POLYGON:\n return self.get_anno_vec()\n else:\n raise Exception('''Can not use polygon property \n since this annotation is no polygon! \n It is a {}'''.format(dtype.TwoDAnno.TYPE_TO_STR[self.dtype].upper()))\n\n @polygon.setter\n def polygon(self, value):\n val_list = [{'x': v[0], 'y':v[1]} for v in value]\n self.data = json.dumps(val_list)\n self.dtype = dtype.TwoDAnno.POLYGON\n\n def get_anno_vec(self):\n '''Get annotation data in list style.\n\n Returns:\n list of floats:\n For a POINT:\n [x, y]\n\n For a BBOX:\n [x, y, w, h]\n\n For a LINE and POLYGONS:\n [[x, y], [x, y],...]\n\n Example:\n HowTo get a numpy array? In the following example a bounding box is returned::\n\n >>> np.array(twod_anno.get_anno_vec())\n array([0.1 , 0.2 , 0.3 , 0.18])\n '''\n\n data = json.loads(self.data)\n if self.dtype == dtype.TwoDAnno.BBOX:\n return [data['x'], data['y'], data['w'], data['h']]\n elif self.dtype == dtype.TwoDAnno.POINT:\n return [data['x'], data['y']]\n elif self.dtype == dtype.TwoDAnno.LINE:\n return [[e['x'], e['y']] for e in data]\n elif self.dtype == dtype.TwoDAnno.POLYGON:\n return [[e['x'], e['y']] for e in data]\n else:\n raise Exception('Unknown TwoDAnno type!')\n\n # def get_lbl_vec(self, which='id'):\n # '''Get labels for this annotations in list style.\n\n # A 2D annotation can contain multiple labels\n\n # Args:\n # which (str):\n\n # 'id':\n # An id in this list is related to :class:`LabelLeaf`\n # that is part of a LabelTree in the LOST framework.\n # A 2D annotation can contain multiple labels.\n\n # 'external_id':\n # An external label id can be any str\n # and is used to map LOST-LabelLeafs to label ids from\n # external systems like ImageNet.\n\n # 'name':\n # Get label names for this annotations in list style.\n\n # Retruns:\n # list of int or str [id, ...]:\n\n # Example:\n # Get vec of label ids\n\n # >>> twod_anno.get_lbl_vec()\n # [2]\n\n # Get related external ids\n\n # >>> twod_anno.get_lbl_vec('external_id')\n # [5]\n\n # Get related label name\n\n # >>> twod_anno.get_lbl_vec('name')\n # ['cow']\n # '''\n # if which == 'id':\n # return [lbl.label_leaf.idx for lbl in self.labels]\n # elif which == 'external_id':\n # return [lbl.label_leaf.external_id for lbl in self.labels]\n # elif which == 'name':\n # return [lbl.label_leaf.name for lbl in self.labels]\n # else:\n # raise Exception('Unknown argument value: {}'.format(which))\n\n # def get_anno_dict(self):\n # '''Get annotation data in dict style\n\n # Retruns:\n # dict:\n # For a POINT:\n # {\"x\": float, \"y\": float}\n\n # For a BBOX:\n # {\"x\": float, \"y\": float, \"w\": float, \"h\": float}\n\n # For a LINE and POLYGONS:\n # [{\"x\": float, \"y\": float}, {\"x\": float, \"y\": float},...]\n # '''\n # return json.loads(self.data)\n\n\nclass ImageAnno(Base):\n \"\"\"An ImageAnno represents an image annotation.\n\n Multiple labels as well as 2d annotations \n (e.g. points, lines, boxes, polygons) \n can be assigned to an image.\n\n Attributes:\n labels (list): The related :class:`Label` object.\n twod_annos (list): A list of :class:`TwoDAnno` objects.\n img_path (str): Path to the image where this anno belongs to.\n frame_n (int): If this image is part of an video,\n frame_n indicates the frame number.\n video_path (str): If this image is part of an video,\n this should be the path to that video in file system.\n sim_class (int): The similarity class this anno belong to.\n It is used to cluster similar annos in MIA\n anno_time: Overall annotation time in seconds.\n timestamp (DateTime): Timestamp of ImageAnno\n iteration (int): The iteration of a loop when this anno was created. \n idx (int): ID of this ImageAnno in database\n anno_task_id (int): ID of the anno_task this\n ImageAnno belongs to.\n state (enum): See :class:`lost.db.state.Anno`\n result_id: Id of the related result.\n user_id (int): Id of the annotator.\n is_junk (bool): This image was marked as Junk.\n description (str): Description for this annotation. Assigned by an \n annotator or algorithm.\n \"\"\"\n __tablename__ = \"image_anno\"\n\n idx = Column(Integer, primary_key=True)\n anno_task_id = Column(Integer, ForeignKey('anno_task.idx'))\n timestamp = Column(DATETIME(fsp=6))\n timestamp_lock = Column(DATETIME(fsp=6))\n state = Column(Integer)\n sim_class = Column(Integer)\n frame_n = Column(Integer)\n video_path = Column(String(4096))\n img_path = Column(String(4096))\n result_id = Column(Integer, ForeignKey('result.idx'))\n iteration = Column(Integer)\n user_id = Column(Integer, ForeignKey('user.idx'))\n labels = relationship('Label')\n twod_annos = relationship('TwoDAnno')\n annotator = relationship('User', uselist=False)\n anno_time = Column(Float)\n is_junk = Column(Boolean)\n description = Column(Text)\n\n def __init__(self, anno_task_id=None, user_id=None,\n timestamp=None, state=None,\n sim_class=None, result_id=None, img_path=None,\n frame_n=None,\n video_path=None,\n iteration=0, anno_time=None, is_junk=None,\n description=None):\n self.anno_task_id = anno_task_id\n self.user_id = user_id\n self.timestamp = timestamp\n self.state = state\n self.sim_class = sim_class\n self.result_id = result_id\n self.img_path = img_path\n self.video_path = video_path\n self.frame_n = frame_n\n self.iteration = iteration\n self.anno_time = anno_time\n self.is_junk = is_junk\n self.description = description\n # if label_leaf_id is not None:\n # self.label = Label(label_leaf_id=label_leaf_id)\n\n def to_dict(self, style='flat'):\n '''Transform this ImageAnno and all related TwoDAnnos into a dict.\n\n Args:\n style (str): 'flat' or 'hierarchical'. \n Return a dict in flat or nested style. \n\n Returns:\n list of dict OR dict:\n In 'flat' style return a list of dicts with one dict\n per annotation.\n In 'hierarchical' style, return a nested dictionary.\n Note: \n In 'flat' style annotation data and lists of labels are\n serialized as json strings. You may want to deserialize them\n with json.loads()\n\n Example:\n HowTo iterate through all TwoDAnnotations of this ImageAnno \n dictionary in *flat* style:\n\n >>> for d in img_anno.to_dict():\n ... print(d['img.img_path'], d['anno.lbl.name'], d['anno.dtype'])\n path/to/img1.jpg ['Aeroplane'] bbox\n path/to/img1.jpg ['Bicycle'] point\n\n Possible keys in *flat* style:\n\n >>> img_anno.to_dict()[0].keys()\n dict_keys([\n 'img.idx', 'img.anno_task_id', 'img.timestamp', \n 'img.timestamp_lock', 'img.state', 'img.sim_class', \n 'img.frame_n', 'img.video_path', 'img.img_path', \n 'img.result_id', 'img.iteration', 'img.user_id', \n 'img.anno_time', 'img.lbl.idx', 'img.lbl.name', \n 'img.lbl.external_id', 'img.annotator', 'img.is_junk'\n 'anno.idx', 'anno.anno_task_id', 'anno.timestamp', \n 'anno.timestamp_lock', 'anno.state', 'anno.track_n', \n 'anno.dtype', 'anno.sim_class', 'anno.iteration', \n 'anno.user_id', 'anno.img_anno_id', 'anno.annotator', \n 'anno.confidence', 'anno.anno_time', 'anno.lbl.idx', \n 'anno.lbl.name', 'anno.lbl.external_id', 'anno.data'\n ])\n\n HowTo iterate through all TwoDAnnotations of this ImageAnno \n dictionary in *hierarchical* style:\n\n >>> h_dict = img_anno.to_dict(style='hierarchical')\n >>> for d in h_dict['img.twod_annos']:\n ... print(h_dict['img.img_path'], d['anno.lbl.name'], d['anno.dtype'])\n path/to/img1.jpg [Aeroplane] bbox\n path/to/img1.jpg [Bicycle] point\n\n Possible keys in *hierarchical* style:\n\n >>> h_dict = img_anno.to_dict(style='hierarchical')\n >>> h_dict.keys()\n dict_keys([\n 'img.idx', 'img.anno_task_id', 'img.timestamp', \n 'img.timestamp_lock', 'img.state', 'img.sim_class', \n 'img.frame_n', 'img.video_path', 'img.img_path', \n 'img.result_id', 'img.iteration', 'img.user_id', \n 'img.anno_time', 'img.lbl.idx', 'img.lbl.name', \n 'img.lbl.external_id', 'img.annotator', 'img.twod_annos'\n ])\n >>> h_dict['img.twod_annos'][0].keys()\n dict_keys([\n 'anno.idx', 'anno.anno_task_id', 'anno.timestamp', \n 'anno.timestamp_lock', 'anno.state', 'anno.track_n', \n 'anno.dtype', 'anno.sim_class', 'anno.iteration', \n 'anno.user_id', 'anno.img_anno_id', 'anno.annotator', \n 'anno.confidence', 'anno.anno_time', 'anno.lbl.idx', \n 'anno.lbl.name', 'anno.lbl.external_id', 'anno.data'\n ])\n '''\n\n img_dict = {\n 'img.idx': self.idx,\n 'img.anno_task_id': self.anno_task_id,\n 'img.timestamp': self.timestamp,\n 'img.timestamp_lock': self.timestamp_lock,\n 'img.state': self.state,\n 'img.sim_class': self.sim_class,\n 'img.frame_n': self.frame_n,\n 'img.video_path': self.video_path,\n 'img.img_path': self.img_path,\n 'img.result_id': self.result_id,\n 'img.iteration': self.iteration,\n 'img.user_id': self.user_id,\n 'img.anno_time': self.anno_time,\n 'img.lbl.idx': None,\n 'img.lbl.name': None,\n 'img.lbl.external_id': None,\n 'img.annotator': None,\n 'img.is_junk': self.is_junk\n }\n try:\n img_dict['img.lbl.idx'] = [\n lbl.label_leaf.idx for lbl in self.labels]\n img_dict['img.lbl.name'] = [\n lbl.label_leaf.name for lbl in self.labels]\n img_dict['img.lbl.external_id'] = [\n lbl.label_leaf.external_id for lbl in self.labels]\n except:\n pass\n try:\n img_dict['img.annotator'] = self.annotator.first_name + \\\n ' ' + self.annotator.last_name\n except:\n pass\n if style == 'hierarchical':\n img_dict['img.twod_annos'] = []\n for anno in self.twod_annos:\n img_dict['img.twod_annos'].append(\n anno.to_dict(style='hierarchical')\n )\n return img_dict\n elif style == 'flat':\n img_dict['img.lbl.idx'] = json.dumps(img_dict['img.lbl.idx'])\n img_dict['img.lbl.name'] = json.dumps(img_dict['img.lbl.name'])\n img_dict['img.lbl.external_id'] = json.dumps(\n img_dict['img.lbl.external_id'])\n d_list = []\n if len(self.twod_annos) > 0:\n for anno in self.twod_annos:\n d_list.append(\n dict(img_dict, **anno.to_dict())\n )\n return d_list\n else:\n empty_anno = TwoDAnno().to_dict()\n return [dict(img_dict, **empty_anno)]\n else:\n raise ValueError(\n 'Unknow style argument! Needs to be \"flat\" or \"hierarchical\".')\n\n def to_df(self):\n '''Tranform this ImageAnnotation and all related TwoDAnnotaitons into a pandas DataFrame.\n\n Returns:\n pandas.DataFrame: Column names are:\n 'img.idx', 'img.anno_task_id', 'img.timestamp', \n 'img.timestamp_lock', 'img.state', 'img.sim_class', \n 'img.frame_n', 'img.video_path', 'img.img_path', \n 'img.result_id', 'img.iteration', 'img.user_id', \n 'img.anno_time', 'img.lbl.idx', 'img.lbl.name', \n 'img.lbl.external_id', 'img.annotator', 'img.is_junk',\n 'anno.idx', 'anno.anno_task_id', 'anno.timestamp', \n 'anno.timestamp_lock', 'anno.state', 'anno.track_n', \n 'anno.dtype', 'anno.sim_class', 'anno.iteration', \n 'anno.user_id', 'anno.img_anno_id', 'anno.annotator', \n 'anno.confidence', 'anno.anno_time', 'anno.lbl.idx', \n 'anno.lbl.name', 'anno.lbl.external_id', 'anno.data'\n '''\n return pd.DataFrame(self.to_dict())\n\n def to_vec(self, columns='all'):\n '''Transform this ImageAnnotation and all related TwoDAnnotations in list style.\n\n Args:\n columns (str or list of str): 'all' OR \n 'img.idx', 'img.anno_task_id', 'img.timestamp', \n 'img.timestamp_lock', 'img.state', 'img.sim_class', \n 'img.frame_n', 'img.video_path', 'img.img_path', \n 'img.result_id', 'img.iteration', 'img.user_id', \n 'img.anno_time', 'img.lbl.idx', 'img.lbl.name', \n 'img.lbl.external_id', 'img.annotator', 'img.is_junk',\n 'anno.idx', 'anno.anno_task_id', 'anno.timestamp', \n 'anno.timestamp_lock', 'anno.state', 'anno.track_n', \n 'anno.dtype', 'anno.sim_class', 'anno.iteration', \n 'anno.user_id', 'anno.img_anno_id', 'anno.annotator', \n 'anno.confidence', 'anno.anno_time', 'anno.lbl.idx', \n 'anno.lbl.name', 'anno.lbl.external_id', 'anno.data'\n\n Retruns:\n list OR list of lists: Desired columns\n\n Example:\n Return just a list of serialized 2d anno labels:\n\n >>> img_anno.to_vec('anno.lbl.name')\n [\"['Aeroplane']\", \"['Bicycle']\"]\n\n Return a list of lists:\n\n >>> img_anno.to_vec(['img.img_path', 'anno.lbl.name', \n ... 'anno.lbl.idx', 'anno.dtype'])\n [\n ['path/to/img1.jpg', \"['Aeroplane']\", 14, 'bbox'], \n ['path/to/img1.jpg', \"['Bicycle']\", 15, 'point']\n ]\n '''\n anno_vec = [vec.get_anno_vec() for vec in self.twod_annos]\n df = self.to_df()\n if anno_vec:\n df.update(pd.DataFrame({'anno.data': anno_vec}))\n if columns == 'all':\n return df.values.tolist()\n else:\n ret = df[columns].values.tolist()\n if ret == [None]:\n ret = []\n return ret\n\n def iter_annos(self, anno_type='bbox'):\n '''Iterator for all related 2D annotations of this image.\n\n Args:\n anno_type (str): Can be bbox', 'point', 'line', 'polygon', 'all'\n\n Retruns:\n iterator of :class:`TwoDAnno` objects\n\n Example:\n >>> for bb in img_anno.iter_annos('bbox'):\n ... do_something(bb)\n '''\n if anno_type == 'all':\n for anno in self.twod_annos:\n yield anno\n else:\n for anno in self.twod_annos:\n if anno.dtype == dtype.TwoDAnno.STR_TO_TYPE[anno_type]:\n yield anno\n\n def get_anno_vec(self, anno_type='bbox'):\n '''Get related 2d annotations in list style.\n\n Args:\n anno_type (str): Can be 'bbox', 'point', 'line', 'polygon'\n\n Returns:\n list of list of floats:\n For POINTs:\n [[x, y], [x, y], ...]\n For BBOXs:\n [[x, y, w, h], [x, y, w, h], ...]\n For LINEs and POLYGONs:\n [[[x, y], [x, y],...], [[x, y], [x, y],...]]\n\n Example:\n In the following example all bounding boxes \n of the image annotation will be returned in list style::\n\n >>> img_anno.anno_vec()\n [[0.1 , 0.2 , 0.3 , 0.18],\n [0.25, 0.25, 0.2, 0.4]]\n >>> img_anno.get_anno_lbl_vec('name', 'bbox') #Get related label names\n [['cow'], ['horse']]\n '''\n res = []\n for anno in self.twod_annos:\n if anno.dtype == dtype.TwoDAnno.STR_TO_TYPE[anno_type]:\n res.append(anno.get_anno_vec())\n return res\n\n\nclass AnnoTask(Base):\n \"\"\"A object that represents a anno task.\n\n Attributes:\n idx (int): ID of this AnnoTask in database.\n manager_id (int): ID of the Manager who had distributed this Task\n group_id (int): ID of the assigned Group (None means: All groups are\n assigned to this task !)\n state (enum): See :class:`data_model.state.AnnoTask`\n progress (float): The Progress of the Task\n dtype (enum): See :class:`data_model.dtype.AnnoTask`\n pipe_element_id (int): ID of related pipeline element.\n timestamp (DateTime): Date and time when this anno task was created.\n instructions (str): Instructions for the annotator of this AnnoTask.\n name (str): A name for this annotask.\n configuration (str): Configuration of this annotask.\n\n Warning:\n *annotator_id = None* means that all users are assigned to this task.\n \"\"\"\n __tablename__ = \"anno_task\"\n idx = Column(Integer, primary_key=True)\n manager_id = Column(Integer, ForeignKey('user.idx'))\n manager = relationship(\"User\", foreign_keys='AnnoTask.manager_id',\n uselist=False)\n group_id = Column(Integer, ForeignKey('group.idx'))\n group = relationship(\"Group\", foreign_keys='AnnoTask.group_id',\n uselist=False)\n state = Column(Integer)\n progress = Column(Float)\n name = Column(String(100))\n dtype = Column(Integer)\n pipe_element_id = Column(Integer, ForeignKey('pipe_element.idx'))\n timestamp = Column(DATETIME(fsp=6))\n instructions = Column(Text)\n configuration = Column(Text)\n last_activity = Column(DATETIME(fsp=6))\n last_annotator_id = Column(Integer, ForeignKey('user.idx'))\n last_annotator = relationship(\n \"User\", foreign_keys='AnnoTask.last_annotator_id', uselist=False)\n req_label_leaves = relationship('RequiredLabelLeaf')\n pipe_element = relationship(\n \"PipeElement\", foreign_keys='AnnoTask.pipe_element_id', uselist=False)\n\n def __init__(self, idx=None, manager_id=None, group_id=None, state=None,\n progress=None, dtype=None, pipe_element_id=None,\n timestamp=None, name=None, instructions=None,\n configuration=None, last_activity=None, last_annotator=None):\n self.idx = idx\n self.manager_id = manager_id\n self.group_id = group_id\n self.state = state\n self.progress = progress\n self.dtype = dtype\n self.pipe_element_id = pipe_element_id\n self.timestamp = timestamp\n self.name = name\n self.instructions = instructions\n self.configuration = configuration\n self.last_activity = last_activity\n self.last_annotator = last_annotator\n\n\nclass Pipe(Base):\n \"\"\"A general pipe (task) that defines how a video/dataset (Media) will be processed.\n\n Attributes:\n idx (int): Id of Pipe in database.\n name (str): Pipe Name\n manager_id : If of user who started this pipe\n state (enum): Status of this pipe. See :class:`data_model.state.Pipe`\n pipe_template_id (int): Id of related PipeTemplate\n timestamp (DateTime): Date and time when this task was created\n timestamp_finished (DateTime): Date and time when this task was finished\n description (str): A general description for this task.\n is_debug_mode (Boolean): DebugMode only visible for Developers\n group_id (int): Group which created this pipe\n is_locked (Boolean): Pipe Locked by PipeEngine\n pipe_template (PipeTemplate): Related :class:`PipeTemplate` object\n logfile_path (Text): path to logfile\n \"\"\"\n __tablename__ = \"pipe\"\n idx = Column(Integer, primary_key=True)\n name = Column(String(100))\n manager_id = Column(Integer, ForeignKey('user.idx'))\n state = Column(Integer)\n pipe_template_id = Column(Integer, ForeignKey('pipe_template.idx'))\n timestamp = Column(DATETIME(fsp=6))\n timestamp_finished = Column(DATETIME(fsp=6))\n description = Column(Text)\n is_debug_mode = Column(Boolean)\n is_locked = Column(Boolean)\n group_id = Column(Integer, ForeignKey('group.idx'))\n group = relationship(\"Group\", uselist=False)\n manager = relationship(\"User\", uselist=False)\n start_definition = Column(Text)\n pe_list = relationship(\"PipeElement\")\n pipe_template = relationship(\"PipeTemplate\", uselist=False)\n logfile_path = Column(String(4096))\n\n def __init__(self, idx=None, name=None, manager_id=None, state=None,\n pipe_template_id=None, timestamp=None,\n timestamp_finished=None, description=None,\n is_locked=None, group_id=None, is_debug_mode=None, start_definition=None, logfile_path=None):\n self.idx = idx\n self.name = name\n self.manager_id = manager_id\n self.state = state\n self.pipe_template_id = pipe_template_id\n self.timestamp = timestamp\n self.timestamp_finished = timestamp_finished\n self.description = description\n self.is_locked = is_locked\n self.group_id = group_id\n self.is_debug_mode = is_debug_mode\n self.start_definition = start_definition\n self.logfile_path = logfile_path\n\n\nclass PipeTemplate(Base):\n \"\"\"A template of an pipeline that need to be copyed by Pipe.\n\n A PipeTemplate Object contains a sequence of PipeElement\n objects. This sequence will be instantiated when a Pipe is created that uses\n this PipeTemplate. Each Pipe will then work on his own sequence of\n PipeElements.\n\n Attributes:\n idx (int): ID in database.\n json_template (Text): A json sting that defines a pipeline template.\n timestamp (DateTime): Date and Time this Template was created or imported.\n is_debug_mode (Boolean): DebugMode shows weather this pipe is viewable for normal users or only for developers\n \"\"\"\n __tablename__ = \"pipe_template\"\n idx = Column(Integer, primary_key=True)\n json_template = Column(Text)\n timestamp = Column(DATETIME(fsp=6))\n is_debug_mode = Column(Boolean)\n\n def __init__(self, idx=None, json_template=None, timestamp=None,\n is_debug_mode=None):\n self.idx = idx\n self.json_template = json_template\n self.timestamp = timestamp\n self.debug_mode = is_debug_mode\n\n\nclass Script(Base):\n \"\"\"A script that can be executed in a pipeline.\n\n Attributes:\n idx (int): ID in database.\n name (str): Name of the algorithm used in this script.\n path (str): Path to a script that will execute a algorithm\n on data in database.\n description (str): Description of this algorithm/ script.\n arguments (str): json object with key value pairs (arguments for script)\n envs (str): json object containing the names of environments that\n may execute this script\n resources (str): Json that defines the resources required by this script\n \"\"\"\n __tablename__ = \"script\"\n idx = Column(Integer, primary_key=True)\n name = Column(String(100))\n path = Column(String(4096))\n description = Column(Text)\n arguments = Column(Text)\n envs = Column(Text)\n resources = Column(Text)\n\n def __init__(self, idx=None, name=None, path=None, description=None,\n arguments=None, envs=None, resources=None):\n self.idx = idx\n self.name = name\n self.path = path\n self.description = description\n self.arguments = arguments\n self.envs = envs\n self.resources = resources\n\n\nclass ChoosenAnnoTask(Base):\n \"\"\"Linking Table which connects Anno Tasks to Groups\n\n Attributes:\n idx (int): ID in database.\n user_id (int): ID of user who has choosen that anno task\n anno_task_id (int): ID of the anno task which is connected to the user\n \"\"\"\n __tablename__ = \"choosen_anno_task\"\n idx = Column(Integer, primary_key=True)\n user_id = Column(Integer, ForeignKey('user.idx'), unique=True)\n anno_task_id = Column(Integer, ForeignKey('anno_task.idx'))\n anno_task = relationship(\"AnnoTask\")\n\n def __init__(self, idx=None, user_id=None, anno_task_id=None):\n self.idx = idx\n self.user_id = user_id\n self.anno_task_id = anno_task_id\n\n\nclass PipeElement(Base):\n \"\"\"One element in a workflow pipeline.\n\n A pipeline element can be an algorithm or an anno_task.\n\n Attributes:\n idx (int): ID in database.\n state (enum): Status of this pipeline element. See\n :class:`data_model.state.PipeElement`\n script_id (int): ID of related script.\n pipe_id (int): ID of related Pipe.\n \"None\" if this PipeElement belongs to a PipelineTemplate.\n dtype (enum): Type\n (see :class:`data_model.dtype.PipeElement`)\n error_msg (str): Exception message. When script had an error.\n error_reported (bool): Weather an error has been reported or not.\n debug_session (str): ssh connection string to temporary debug session.\n is_debug_mode (Boolean): DebugMode only visible for Developers.\n instance_context (str): A path where files of instantiated PipeElement can\n be stored.\n pe_outs (list): List of linked :class:`PipeElement` objects that are\n connected to this PipeElement.\n result_in (list): List of related input :class:`Result` objects.\n result_out (list): List of related output :class:`Result` objects.\n anno_task (object): Related :class:`AnnoTask`.\n script (object): Related :class:`Script`.\n iteration (int): Current iteration. Represents the number of times this\n PipeElement has been processed.\n pipe_context (str): A path to store files that can be used by all elements\n in a pipeline.\n progress (float): Progress of PipeElement (e.g. progress of the script its running)\n arguments: In case of dtype is script - instance arguments for script\n loop (object): Related :class:`Loop`\n datasource (object): Realted :class:`Datasource`\n \"\"\"\n __tablename__ = \"pipe_element\"\n idx = Column(Integer, primary_key=True)\n state = Column(Integer)\n script_id = Column(Integer, ForeignKey('script.idx'))\n pipe_id = Column(Integer, ForeignKey('pipe.idx'))\n dtype = Column(Integer)\n error_msg = Column(Text)\n error_reported = Column(Boolean)\n warning_msg = Column(String(4096))\n log_msg = Column(String(4096))\n debug_session = Column(String(4096))\n is_debug_mode = Column(Boolean)\n instance_context = Column(String(4096))\n pe_outs = relationship(\"PipeElement\", secondary=\"result_link\",\n primaryjoin=\"PipeElement.idx==result_link.c.pe_n\",\n secondaryjoin=\"PipeElement.idx==result_link.c.pe_out\")\n result_in = relationship(\"Result\", secondary=\"result_link\",\n primaryjoin=\"PipeElement.idx==result_link.c.pe_out\",\n secondaryjoin=\"Result.idx==result_link.c.result_id\")\n result_out = relationship(\"Result\", secondary=\"result_link\",\n primaryjoin=\"PipeElement.idx==result_link.c.pe_n\",\n secondaryjoin=\"Result.idx==result_link.c.result_id\")\n anno_task = relationship(\"AnnoTask\", uselist=False)\n script = relationship(\"Script\", uselist=False)\n iteration = Column(Integer)\n pipe_context = Column(String(4096))\n progress = Column(Float)\n arguments = Column(Text)\n loop = relationship(\n \"Loop\", foreign_keys='Loop.pipe_element_id', uselist=False)\n pipe = relationship(\"Pipe\", uselist=False)\n datasource = relationship('Datasource', uselist=False)\n\n def __init__(self, idx=None, state=None, dtype=None,\n anno_task=None, pipe_id=None, is_debug_mode=None,\n error_msg=None, error_reported=False, warning_msg=None,\n log_msg=None, instance_context=None, iteration=0,\n pipe_context=None, progress=None, arguments=None):\n self.idx = idx\n self.state = state\n self.dtype = dtype\n self.anno_task = anno_task\n self.pipe_id = pipe_id\n self.is_debug_mode = is_debug_mode\n self.error_msg = error_msg\n self.error_reported = error_reported\n self.warning_msg = warning_msg\n self.log_msg = log_msg\n self.instance_context = instance_context\n self.iteration = iteration\n self.pipe_context = pipe_context\n self.progress = progress\n self.arguments = arguments\n\n\nclass Result(Base):\n \"\"\"The Result of an Algorithm or AnnoTask\n\n Attributes:\n idx (int): ID in database.\n timestamp (DateTime): Date and time when this result was created.\n img_annos (list): A list of related :class:`ImageAnno` objects.\n visual_outputs (list): A list of related :class:`VisualOutput` objects.\n \"\"\"\n __tablename__ = \"result\"\n idx = Column(Integer, primary_key=True)\n timestamp = Column(DATETIME(fsp=6))\n img_annos = relationship(\"ImageAnno\")\n visual_outputs = relationship(\"VisualOutput\")\n data_exports = relationship(\"VisualOutput\")\n\n def __init__(self, timestamp=None, media_id=None):\n self.timestamp = timestamp\n self.media_id = media_id\n\n def add_img_anno(self, img_anno):\n '''Add a :class:`ImageAnno` to this result.\n '''\n self.img_annos.append(img_anno)\n\n def add_visual_output(self, visual_output):\n '''Add a :class:`VisualOutput` to this result.\n '''\n self.visual_outputs.append(visual_output)\n\n def iter_img_annos(self):\n '''Iterate over all :class:`ImageAnno` objects in this Result.\n\n Returns:\n Iterator: :class:`ImageAnno` objects.\n '''\n return iter(self.img_annos)\n\n def iter_bbox_annos(self):\n '''Iterate over all :class:`TwoDAnno` objects in this Result.\n\n Returns:\n Iterator: :class:`TwoDAnno` objects.\n '''\n for img_anno in self.img_annos:\n for bb_anno in img_anno.bbox_annos:\n yield bb_anno\n\n def iter_visual_outputs(self):\n '''Iterate over all :class:`VisualOutput` objects in this Result.\n\n Returns:\n Iterator: :class:`VisualOutput`.\n '''\n return iter(self.visual_outputs)\n\n\nclass Datasource(Base):\n '''Datasource\n\n Attributes:\n idx (int): Id in databse.\n raw_file_id (int): Link to RawFile.\n dtype (enum): see :class:`data_model.dtype.Datasource`\n pipe_element_id: The PipeElement this Datasource belongs to.\n '''\n __tablename__ = \"datasource\"\n idx = Column(Integer, primary_key=True)\n raw_file_path = Column(String(4096))\n dtype = Column(Integer)\n pipe_element_id = Column(Integer, ForeignKey('pipe_element.idx'))\n\n def __init__(self, media_id=None, dtype=None,\n pipe_element_id=None):\n self.media_id = media_id\n self.dtype = dtype\n self.pipe_element_id = pipe_element_id\n\n\nclass VisualOutput(Base):\n '''A VisualOutput will be used by a visulaise PipeElement to display\n statistics about data.\n\n Attributes:\n idx (int): db id.\n img_path (str): Path to an image that contains some useful informations. For\n example a diagram.\n html_string (str): HTML that should be presented by a visualise Element.\n result_id (int): Link to related result.\n iteration (int): Loop iteration when this output was created.\n '''\n __tablename__ = \"visual_output\"\n idx = Column(Integer, primary_key=True)\n img_path = Column(String(4096))\n html_string = Column(Text)\n result_id = Column(Integer, ForeignKey('result.idx'))\n iteration = Column(Integer)\n\n def __init__(self, img_path=None, html_string=None, result_id=None, iteration=0):\n self.img_path = img_path\n self.html_string = html_string\n self.result_id = result_id\n self.iteration = iteration\n\n\nclass ResultLink(Base):\n '''Links :class:`Result` objects to :class:`PipelineElement` objects\n\n Attributes:\n idx (int): db id.\n result_id (int): Id of the realated :class:`Result`.\n pe_n (int): Id of the :class:`PipelineElement` that has pe_out as output.\n pe_out (int): Id of the :class:`PipelineElement` that uses :class:`Result`\n as output.\n '''\n __tablename__ = \"result_link\"\n idx = Column(Integer, primary_key=True)\n result_id = Column(Integer, ForeignKey('result.idx'))\n pe_n = Column(Integer, ForeignKey('pipe_element.idx'))\n pe_out = Column(Integer, ForeignKey('pipe_element.idx'))\n result = relationship(\"Result\", uselist=False)\n\n def __init__(self, pe_n=None, pe_out=None, result_id=None):\n self.pe_n = pe_n\n self.pe_out = pe_out\n self.result_id = result_id\n\n\nclass DataExport(Base):\n '''A DatatExport represents an arbitrary file that is the result of a pipeline.\n\n Attributes:\n idx (str): ID in database.\n file_path (str): Path to the result file.\n result_id (int): ID of the releated :class:`Result`.\n iteration (int): Loop iteration when this DataExport was created.\n '''\n __tablename__ = \"data_export\"\n idx = Column(Integer, primary_key=True)\n file_path = Column(String(4096))\n result_id = Column(Integer, ForeignKey('result.idx'))\n iteration = Column(Integer)\n\n def __init__(self, file_path=None, result_id=None, iteration=0):\n self.file_path = file_path\n self.result_id = result_id\n self.iteration = iteration\n\n\nclass Loop(Base):\n '''Defines a Loop element in a pipeline.\n\n Attributes:\n idx (int): ID in database.\n max_iteration (int): Number of iteration when loop will break.\n iteration (int): Current iteration of the loop.\n pe_jump_id (int): ID of the :class:`PipeElement` where this loop should jump to.\n break_loop (bool): Indicates wether a script wants to break this loop.\n pe_jump (model.PipeElement): Related PipeElement object.\n pipe_element_id (int): The PipeElement this Loop belongs to.\n '''\n __tablename__ = \"loop\"\n idx = Column(Integer, primary_key=True)\n max_iteration = Column(Integer)\n iteration = Column(Integer)\n pe_jump_id = Column(Integer, ForeignKey('pipe_element.idx'))\n break_loop = Column(Boolean)\n pe_jump = relationship(\"PipeElement\", foreign_keys='Loop.pe_jump_id',\n uselist=False)\n pipe_element_id = Column(Integer, ForeignKey('pipe_element.idx'))\n\n def __init__(self, max_iteration=None, iteration=0, pe_jump_id=None,\n break_loop=False, pipe_element_id=None):\n self.max_iteration = max_iteration\n self.iteration = iteration\n self.pe_jump_id = pe_jump_id\n self.break_loop = break_loop\n self.pipe_element_id = pipe_element_id\n\n\nclass LabelLeaf(Base):\n '''A LabelLeaf\n\n Attributes:\n idx (int): ID in database.\n name (str): Name of the LabelName.\n abbreviation (str):\n description (str):\n timestamp (DateTime):\n external_id (str): Id of an external semantic label system (for e.g. synsetid of wordnet)\n is_deleted (Boolean): \n is_root (Boolean): Indicates if this leaf is the root of a tree.\n parent_leaf_id (Integer): Reference to parent LabelLeaf.\n label_leafs (list of :class:`LabelLeaf`):\n '''\n __tablename__ = \"label_leaf\"\n idx = Column(Integer, primary_key=True)\n name = Column(String(100))\n abbreviation = Column(String(20))\n timestamp = Column(DATETIME(fsp=6))\n description = Column(Text)\n external_id = Column(String(4096))\n is_deleted = Column(Boolean)\n is_root = Column(Boolean)\n parent_leaf_id = Column(Integer, ForeignKey('label_leaf.idx'))\n label_leaves = relationship('LabelLeaf')\n\n def __init__(self, idx=None, name=None, abbreviation=None, description=None,\n timestamp=None, external_id=None, label_tree_id=None, is_deleted=None,\n parent_leaf_id=None, is_root=None):\n self.idx = idx\n self.name = name\n self.abbreviation = abbreviation\n self.description = description\n self.timestamp = timestamp\n self.external_id = external_id\n self.is_deleted = is_deleted\n self.parent_leaf_id = parent_leaf_id\n self.is_root = is_root\n\n def to_dict(self):\n '''Transform this object to a dict.\n\n Returns:\n dict:\n '''\n return {\n 'idx': self.idx,\n 'name': self.name,\n 'abbreviation': self.abbreviation,\n 'description': self.description,\n 'timestamp': self.timestamp,\n 'external_id': self.external_id,\n 'is_deleted': self.is_deleted,\n 'parent_leaf_id': self.parent_leaf_id,\n 'is_root': self.is_root\n }\n\n def to_df(self):\n '''Transform this LabelLeaf to a pandas DataFrame.\n\n Returns:\n pd.DataFrame:\n '''\n return pd.DataFrame(self.to_dict(), index=[0])\n\n\nclass Label(Base):\n '''Represants an Label that is related to an annoation.\n\n Attributes:\n idx (int): ID in database.\n dtype (enum): :class:`lost.db.dtype.Result` type of this attribute.\n label_leaf_id: ID of related :class:`model.LabelLeaf`.\n img_anno_id (int):\n two_d_anno_id (int):\n timestamp (DateTime):\n timestamp_lock (DateTime):\n label_leaf (model.LabelLeaf): related :class:`model.LabelLeaf` object.\n annotator_id (Integer): GroupID of Annotator who has assigned this Label.\n confidence (float): Confidence of Annotation.\n anno_time (float): Time of annotaiton duration\n\n '''\n __tablename__ = \"label\"\n idx = Column(Integer, primary_key=True)\n dtype = Column(Integer)\n label_leaf_id = Column(Integer, ForeignKey(\n 'label_leaf.idx'), nullable=False)\n img_anno_id = Column(Integer, ForeignKey('image_anno.idx'))\n two_d_anno_id = Column(Integer, ForeignKey('two_d_anno.idx'))\n annotator_id = Column(Integer, ForeignKey('user.idx'))\n timestamp = Column(DATETIME(fsp=6))\n timestamp_lock = Column(DATETIME(fsp=6))\n label_leaf = relationship('LabelLeaf', uselist=False)\n confidence = Column(Float)\n anno_time = Column(Float)\n\n def __init__(self, idx=None, dtype=None, label_leaf_id=None, img_anno_id=None,\n two_d_anno_id=None, annotator_id=None,\n timestamp_lock=None, timestamp=None,\n confidence=None, anno_time=None):\n self.idx = idx\n self.dtype = dtype\n self.label_leaf_id = label_leaf_id\n self.img_anno_id = img_anno_id\n self.two_d_anno_id = two_d_anno_id\n self.annotator_id = annotator_id\n self.timestamp_lock = timestamp_lock\n self.timestamp = timestamp\n self.confidence = confidence\n self.anno_time = anno_time\n\n\nclass Track(Base):\n '''Represents a track. Multiple TwoDAnnos are assigned to one track.\n\n Attributes:\n idx (int): ID in database.\n track_n (int): Track number that identifies this track inside of\n an annotation session.\n anno_task_id (int): ID of the related annotation task\n name (str): A human readable name for this track.\n timestamp (DateTime): Timestamp when this track was created.\n user_id (int): Id of Annotator who has assigned this Label.\n iteration (int): Iteration in which this track was annotated\n confidence (float): A confidence value for the annotated track.\n anno_time (float): Time of annotaiton duration.\n\n '''\n __tablename__ = \"track\"\n idx = Column(Integer, primary_key=True)\n track_n = Column(Integer)\n anno_task_id = Column(Integer, ForeignKey('anno_task.idx')) \n name = Column(String(100))\n timestamp = Column(DATETIME(fsp=6))\n user_id = Column(Integer, ForeignKey('user.idx'))\n iteration = Column(Integer)\n confidence = Column(Float)\n anno_time = Column(Float)\n twod_annos = relationship('TwoDAnno')\n annotator = relationship('User', uselist=False)\n\n def __init__(self, idx=None, track_n=None, \n anno_task_id=None, name=None, timestamp=None,\n user_id=None, iteration=None,\n confidence=None, anno_time=None\n ):\n self.idx = idx\n self.track_n = track_n\n self.anno_task_id = anno_task_id\n self.name = name\n self.timestamp = timestamp\n self.user_id = user_id\n self.iteration = iteration\n self.confidence = confidence\n self.anno_time = anno_time\n\n\nclass RequiredLabelLeaf(Base):\n '''A RequiredLabelLeaf\n\n Attributes:\n idx (int): ID in database.\n anno_task_id (int):\n label_leaf_id (int):\n max_labels (int): Max count of labels that can be assinged for a specific\n :class:`AnnoTask`\n max_depth (int): Maximal depth in a tree beginning from that LabelLeaf\n '''\n __tablename__ = \"required_label_leaf\"\n idx = Column(Integer, primary_key=True)\n anno_task_id = Column(Integer, ForeignKey('anno_task.idx'))\n label_leaf_id = Column(Integer, ForeignKey('label_leaf.idx'))\n max_labels = Column(Integer)\n # type: lost.db.model.LabelLeaf\n label_leaf = relationship(\"LabelLeaf\", uselist=False)\n\n def __init__(self, anno_task_id=None, label_leaf_id=None, max_labels=None):\n self.anno_task_id = anno_task_id\n self.label_leaf_id = label_leaf_id\n self.max_labels = max_labels\n\n\nclass Worker(Base):\n '''Represents a container with related celery worker that executes scripts.\n\n Attributes:\n idx (int): ID in database.\n env_name (str): Name that indicates the environment that is\n installed in this worker. Each env is realted to a queue in\n celery.\n worker_name (str): Unique name for a container/ worker. \n timestamp (DateTime): Last life sign of worker.\n register_timestamp (DateTime): Timestamp of first registration \n of a worker in LOST.\n resources (str): Json containing the available resources of a \n worker.\n in_progress (str): Json dict containing scripts that are currently\n executed by this worker. {'pipe_element_id': 'script_path',...}\n '''\n __tablename__ = \"worker\"\n idx = Column(Integer, primary_key=True)\n env_name = Column(String(100))\n worker_name = Column(String(100))\n timestamp = Column(DATETIME(fsp=6))\n register_timestamp = Column(DATETIME(fsp=6))\n resources = Column(Text)\n in_progress = Column(Text)\n\n def __init__(self, idx=None, env_name=None,\n worker_name=None, timestamp=None,\n register_timestamp=None, resources=None, in_progress=None):\n self.idx = idx\n self.env_name = env_name\n self.worker_name = worker_name\n self.timestamp = timestamp\n self.register_timestamp = register_timestamp\n self.resources = resources\n self.in_progress = in_progress\n"
] |
[
[
"pandas.DataFrame"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [],
"tensorflow": []
}
] |
nguyenphan99/test
|
[
"75429497b0ca8b802803dd1518d0dc25a7fd4008"
] |
[
"models/resnet.py"
] |
[
"from typing import Type, Any, Callable, Union, List, Optional\n\nimport torch\nimport torch.nn as nn\nfrom torch import Tensor\n\nfrom ._internally_replaced_utils import load_state_dict_from_url\nfrom .utils import _log_api_usage_once\n\n\n__all__ = [\n \"ResNet\",\n \"resnet18\",\n \"resnet34\",\n \"resnet50\",\n \"resnet101\",\n \"resnet152\",\n \"resnext50_32x4d\",\n \"resnext101_32x8d\",\n \"wide_resnet50_2\",\n \"wide_resnet101_2\",\n]\n\n\nmodel_urls = {\n \"resnet18\": \"https://download.pytorch.org/models/resnet18-f37072fd.pth\",\n \"resnet34\": \"https://download.pytorch.org/models/resnet34-b627a593.pth\",\n \"resnet50\": \"https://download.pytorch.org/models/resnet50-0676ba61.pth\",\n \"resnet101\": \"https://download.pytorch.org/models/resnet101-63fe2227.pth\",\n \"resnet152\": \"https://download.pytorch.org/models/resnet152-394f9c45.pth\",\n \"resnext50_32x4d\": \"https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth\",\n \"resnext101_32x8d\": \"https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth\",\n \"wide_resnet50_2\": \"https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth\",\n \"wide_resnet101_2\": \"https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth\",\n}\n\n\ndef conv3x3(in_planes: int, out_planes: int, stride: int = 1, groups: int = 1, dilation: int = 1) -> nn.Conv2d:\n \"\"\"3x3 convolution with padding\"\"\"\n return nn.Conv2d(\n in_planes,\n out_planes,\n kernel_size=3,\n stride=stride,\n padding=dilation,\n groups=groups,\n bias=False,\n dilation=dilation,\n )\n\n\ndef conv1x1(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:\n \"\"\"1x1 convolution\"\"\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)\n\n\nclass BasicBlock(nn.Module):\n expansion: int = 1\n\n def __init__(\n self,\n inplanes: int,\n planes: int,\n stride: int = 1,\n downsample: Optional[nn.Module] = None,\n groups: int = 1,\n base_width: int = 64,\n dilation: int = 1,\n norm_layer: Optional[Callable[..., nn.Module]] = None,\n ) -> None:\n super().__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError(\"BasicBlock only supports groups=1 and base_width=64\")\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n self.conv2 = conv3x3(planes, planes)\n self.bn2 = norm_layer(planes)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x: Tensor) -> Tensor:\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass Bottleneck(nn.Module):\n # Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)\n # while original implementation places the stride at the first 1x1 convolution(self.conv1)\n # according to \"Deep residual learning for image recognition\"https://arxiv.org/abs/1512.03385.\n # This variant is also known as ResNet V1.5 and improves accuracy according to\n # https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.\n\n expansion: int = 4\n\n def __init__(\n self,\n inplanes: int,\n planes: int,\n stride: int = 1,\n downsample: Optional[nn.Module] = None,\n groups: int = 1,\n base_width: int = 64,\n dilation: int = 1,\n norm_layer: Optional[Callable[..., nn.Module]] = None,\n ) -> None:\n super().__init__()\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n width = int(planes * (base_width / 64.0)) * groups\n # Both self.conv2 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv1x1(inplanes, width)\n self.bn1 = norm_layer(width)\n self.conv2 = conv3x3(width, width, stride, groups, dilation)\n self.bn2 = norm_layer(width)\n self.conv3 = conv1x1(width, planes * self.expansion)\n self.bn3 = norm_layer(planes * self.expansion)\n self.relu = nn.ReLU(inplace=True)\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x: Tensor) -> Tensor:\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(\n self,\n block: Type[Union[BasicBlock, Bottleneck]],\n layers: List[int],\n num_classes: int = 1000,\n in_channels: int = 15,\n zero_init_residual: bool = False,\n groups: int = 1,\n width_per_group: int = 64,\n replace_stride_with_dilation: Optional[List[bool]] = None,\n norm_layer: Optional[Callable[..., nn.Module]] = None,\n ) -> None:\n super().__init__()\n _log_api_usage_once(self)\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\n \"replace_stride_with_dilation should be None \"\n f\"or a 3-element tuple, got {replace_stride_with_dilation}\"\n )\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = nn.Conv2d(in_channels, self.inplanes, kernel_size=7, stride=2, padding=3, bias=False)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2, dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2, dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2, dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode=\"fan_out\", nonlinearity=\"relu\")\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0) # type: ignore[arg-type]\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0) # type: ignore[arg-type]\n\n def _make_layer(\n self,\n block: Type[Union[BasicBlock, Bottleneck]],\n planes: int,\n blocks: int,\n stride: int = 1,\n dilate: bool = False,\n ) -> nn.Sequential:\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(\n block(\n self.inplanes, planes, stride, downsample, self.groups, self.base_width, previous_dilation, norm_layer\n )\n )\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(\n block(\n self.inplanes,\n planes,\n groups=self.groups,\n base_width=self.base_width,\n dilation=self.dilation,\n norm_layer=norm_layer,\n )\n )\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x: Tensor) -> Tensor:\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n def forward(self, x: Tensor) -> Tensor:\n return self._forward_impl(x)\n\n\ndef _resnet(\n arch: str,\n block: Type[Union[BasicBlock, Bottleneck]],\n layers: List[int],\n pretrained: bool,\n progress: bool,\n **kwargs: Any,\n) -> ResNet:\n model = ResNet(block, layers, **kwargs)\n if pretrained:\n state_dict = load_state_dict_from_url(model_urls[arch], progress=progress)\n model.load_state_dict(state_dict)\n return model\n\n\ndef resnet18(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet18\", BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)\n\n\ndef resnet34(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-34 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet34\", BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)\n\n\ndef resnet50(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-50 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet50\", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)\n\n\ndef resnet101(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-101 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet101\", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)\n\n\ndef resnet152(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNet-152 model from\n `\"Deep Residual Learning for Image Recognition\" <https://arxiv.org/pdf/1512.03385.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n return _resnet(\"resnet152\", Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)\n\n\ndef resnext50_32x4d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNeXt-50 32x4d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs[\"groups\"] = 32\n kwargs[\"width_per_group\"] = 4\n return _resnet(\"resnext50_32x4d\", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)\n\n\ndef resnext101_32x8d(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"ResNeXt-101 32x8d model from\n `\"Aggregated Residual Transformation for Deep Neural Networks\" <https://arxiv.org/pdf/1611.05431.pdf>`_.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs[\"groups\"] = 32\n kwargs[\"width_per_group\"] = 8\n return _resnet(\"resnext101_32x8d\", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)\n\n\ndef wide_resnet50_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"Wide ResNet-50-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs[\"width_per_group\"] = 64 * 2\n return _resnet(\"wide_resnet50_2\", Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)\n\n\ndef wide_resnet101_2(pretrained: bool = False, progress: bool = True, **kwargs: Any) -> ResNet:\n r\"\"\"Wide ResNet-101-2 model from\n `\"Wide Residual Networks\" <https://arxiv.org/pdf/1605.07146.pdf>`_.\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n \"\"\"\n kwargs[\"width_per_group\"] = 64 * 2\n return _resnet(\"wide_resnet101_2\", Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)"
] |
[
[
"torch.nn.Sequential",
"torch.nn.init.constant_",
"torch.nn.Conv2d",
"torch.nn.Linear",
"torch.nn.MaxPool2d",
"torch.nn.AdaptiveAvgPool2d",
"torch.flatten",
"torch.nn.ReLU",
"torch.nn.init.kaiming_normal_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Morgan-Gan/2D-3D-SLowFast-TSM--Proj
|
[
"9568be69fc87d59d569327ae5c3d2c448bc23648"
] |
[
"backbone/hidden_for_roi_maxpool.py"
] |
[
"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport numpy as np\n\n\nclass Hidden(nn.Module):\n\n def __init__(self, inplanes, planes, stride=1):\n super(Hidden, self).__init__()\n\n def forward(self, x):\n out=x.view(x.shape[0],-1)\n out = out.view(-1, out.size(1))\n return out\n\ndef weight_init(m):\n # 也可以判断是否为conv2d,使用相应的初始化方式\n if isinstance(m, nn.Conv3d):\n print(\"using kaiming\")\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n\ndef hidden50(**kwargs):\n \"\"\"Constructs a ResNet-50 model.\n \"\"\"\n model = Hidden(2304,2304,2)\n return model"
] |
[
[
"torch.nn.init.kaiming_normal_"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
doggeral/incubator-mxnet
|
[
"bcff49888fdaae6b9922de4d4712d505cf33c596"
] |
[
"python/mxnet/image/image.py"
] |
[
"# Licensed to the Apache Software Foundation (ASF) under one\n# or more contributor license agreements. See the NOTICE file\n# distributed with this work for additional information\n# regarding copyright ownership. The ASF licenses this file\n# to you under the Apache License, Version 2.0 (the\n# \"License\"); you may not use this file except in compliance\n# with the License. You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing,\n# software distributed under the License is distributed on an\n# \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY\n# KIND, either express or implied. See the License for the\n# specific language governing permissions and limitations\n# under the License.\n\n# pylint: disable=no-member, too-many-lines, redefined-builtin, protected-access, unused-import, invalid-name\n# pylint: disable=too-many-arguments, too-many-locals, no-name-in-module, too-many-branches, too-many-statements\n\"\"\"Read individual image files and perform augmentations.\"\"\"\n\nfrom __future__ import absolute_import, print_function\n\nimport sys\nimport os\nimport random\nimport logging\nimport json\nimport warnings\nimport numpy as np\n\n\ntry:\n import cv2\nexcept ImportError:\n cv2 = None\n\nfrom ..base import numeric_types\nfrom .. import ndarray as nd\nfrom ..ndarray import _internal\nfrom .. import io\nfrom .. import recordio\n\n\ndef imread(filename, *args, **kwargs):\n \"\"\"Read and decode an image to an NDArray.\n\n .. note:: `imread` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n filename : str\n Name of the image file to be loaded.\n flag : {0, 1}, default 1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : bool, default True\n True for RGB formatted output (MXNet default).\n False for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> mx.img.imread(\"flower.jpg\")\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> mx.img.imread(\"flower.jpg\", flag=0)\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> mx.img.imread(\"flower.jpg\", to_rgb=0)\n <NDArray 224x224x3 @cpu(0)>\n \"\"\"\n return _internal._cvimread(filename, *args, **kwargs)\n\n\ndef imresize(src, w, h, *args, **kwargs):\n r\"\"\"Resize image with OpenCV.\n\n .. note:: `imresize` uses OpenCV (not the CV2 Python library). MXNet must have been built\n with USE_OPENCV=1 for `imresize` to work.\n\n Parameters\n ----------\n src : NDArray\n source image\n w : int, required\n Width of resized image.\n h : int, required\n Height of resized image.\n interp : int, optional, default=1\n Interpolation method (default=cv2.INTER_LINEAR).\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\n out : NDArray, optional\n The output NDArray to hold the result.\n\n Returns\n -------\n out : NDArray or list of NDArrays\n The output of this function.\n\n Example\n -------\n >>> with open(\"flower.jpeg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> new_image = mx.img.resize(image, 240, 360)\n >>> new_image\n <NDArray 240x360x3 @cpu(0)>\n \"\"\"\n return _internal._cvimresize(src, w, h, *args, **kwargs)\n\n\ndef imdecode(buf, *args, **kwargs):\n \"\"\"Decode an image to an NDArray.\n\n .. note:: `imdecode` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with USE_OPENCV=1 for `imdecode` to work.\n\n Parameters\n ----------\n buf : str/bytes/bytearray or numpy.ndarray\n Binary image data as string or numpy ndarray.\n flag : int, optional, default=1\n 1 for three channel color output. 0 for grayscale output.\n to_rgb : int, optional, default=1\n 1 for RGB formatted output (MXNet default). 0 for BGR formatted output (OpenCV default).\n out : NDArray, optional\n Output buffer. Use `None` for automatic allocation.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the image.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n\n Set `flag` parameter to 0 to get grayscale output\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, flag=0)\n >>> image\n <NDArray 224x224x1 @cpu(0)>\n\n Set `to_rgb` parameter to 0 to get output in OpenCV format (BGR)\n\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image, to_rgb=0)\n >>> image\n <NDArray 224x224x3 @cpu(0)>\n \"\"\"\n if not isinstance(buf, nd.NDArray):\n if sys.version_info[0] == 3 and not isinstance(buf, (bytes, bytearray, np.ndarray)):\n raise ValueError('buf must be of type bytes, bytearray or numpy.ndarray,'\n 'if you would like to input type str, please convert to bytes')\n buf = nd.array(np.frombuffer(buf, dtype=np.uint8), dtype=np.uint8)\n\n return _internal._cvimdecode(buf, *args, **kwargs)\n\n\ndef scale_down(src_size, size):\n \"\"\"Scales down crop size if it's larger than image size.\n\n If width/height of the crop is larger than the width/height of the image,\n sets the width/height to the width/height of the image.\n\n Parameters\n ----------\n src_size : tuple of int\n Size of the image in (width, height) format.\n size : tuple of int\n Size of the crop in (width, height) format.\n\n Returns\n -------\n tuple of int\n A tuple containing the scaled crop size in (width, height) format.\n\n Example\n --------\n >>> src_size = (640,480)\n >>> size = (720,120)\n >>> new_size = mx.img.scale_down(src_size, size)\n >>> new_size\n (640,106)\n \"\"\"\n w, h = size\n sw, sh = src_size\n if sh < h:\n w, h = float(w * sh) / h, sh\n if sw < w:\n w, h = sw, float(h * sw) / w\n return int(w), int(h)\n\n\ndef copyMakeBorder(src, top, bot, left, right, *args, **kwargs):\n \"\"\"Pad image border with OpenCV.\n\n Parameters\n ----------\n src : NDArray\n source image\n top : int, required\n Top margin.\n bot : int, required\n Bottom margin.\n left : int, required\n Left margin.\n right : int, required\n Right margin.\n type : int, optional, default='0'\n Filling type (default=cv2.BORDER_CONSTANT).\n 0 - cv2.BORDER_CONSTANT - Adds a constant colored border.\n 1 - cv2.BORDER_REFLECT - Border will be mirror reflection of the\n border elements, like this : fedcba|abcdefgh|hgfedcb\n 2 - cv2.BORDER_REFLECT_101 or cv.BORDER_DEFAULT - Same as above,\n but with a slight change, like this : gfedcb|abcdefgh|gfedcba\n 3 - cv2.BORDER_REPLICATE - Last element is replicated throughout,\n like this: aaaaaa|abcdefgh|hhhhhhh\n 4 - cv2.BORDER_WRAP - it will look like this : cdefgh|abcdefgh|abcdefg\n value : double, optional, default=0\n (Deprecated! Use ``values`` instead.) Fill with single value.\n values : tuple of <double>, optional, default=[]\n Fill with value(RGB[A] or gray), up to 4 channels.\n\n out : NDArray, optional\n The output NDArray to hold the result.\n\n Returns\n -------\n out : NDArray or list of NDArrays\n The output of this function.\n\n Example\n --------\n >>> with open(\"flower.jpeg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> new_image = mx_border = mx.image.copyMakeBorder(mx_img, 1, 2, 3, 4, type=0)\n >>> new_image\n <NDArray 2324x3489x3 @cpu(0)>\n \"\"\"\n return _internal._cvcopyMakeBorder(src, top, bot, left, right, *args, **kwargs)\n\n\ndef _get_interp_method(interp, sizes=()):\n \"\"\"Get the interpolation method for resize functions.\n The major purpose of this function is to wrap a random interp method selection\n and a auto-estimation method.\n\n Parameters\n ----------\n interp : int\n interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n sizes : tuple of int\n (old_height, old_width, new_height, new_width), if None provided, auto(9)\n will return Area(2) anyway.\n\n Returns\n -------\n int\n interp method from 0 to 4\n \"\"\"\n if interp == 9:\n if sizes:\n assert len(sizes) == 4\n oh, ow, nh, nw = sizes\n if nh > oh and nw > ow:\n return 2\n elif nh < oh and nw < ow:\n return 3\n else:\n return 1\n else:\n return 2\n if interp == 10:\n return random.randint(0, 4)\n if interp not in (0, 1, 2, 3, 4):\n raise ValueError('Unknown interp method %d' % interp)\n return interp\n\n\ndef resize_short(src, size, interp=2):\n \"\"\"Resizes shorter edge to size.\n\n .. note:: `resize_short` uses OpenCV (not the CV2 Python library).\n MXNet must have been built with OpenCV for `resize_short` to work.\n\n Resizes the original image by setting the shorter edge to size\n and setting the longer edge accordingly.\n Resizing function is called from OpenCV.\n\n Parameters\n ----------\n src : NDArray\n The original image.\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method used for resizing the image.\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n More details can be found in the documentation of OpenCV, please refer to\n http://docs.opencv.org/master/da/d54/group__imgproc__transform.html.\n\n Returns\n -------\n NDArray\n An 'NDArray' containing the resized image.\n\n Example\n -------\n >>> with open(\"flower.jpeg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.img.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> size = 640\n >>> new_image = mx.img.resize_short(image, size)\n >>> new_image\n <NDArray 2321x3482x3 @cpu(0)>\n \"\"\"\n h, w, _ = src.shape\n if h > w:\n new_h, new_w = size * h // w, size\n else:\n new_h, new_w = size, size * w // h\n return imresize(src, new_w, new_h, interp=_get_interp_method(interp, (h, w, new_h, new_w)))\n\n\ndef fixed_crop(src, x0, y0, w, h, size=None, interp=2):\n \"\"\"Crop src at fixed location, and (optionally) resize it to size.\n\n Parameters\n ----------\n src : NDArray\n Input image\n x0 : int\n Left boundary of the cropping area\n y0 : int\n Top boundary of the cropping area\n w : int\n Width of the cropping area\n h : int\n Height of the cropping area\n size : tuple of (w, h)\n Optional, resize to new size after cropping\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n \"\"\"\n out = nd.slice(src, begin=(y0, x0, 0), end=(y0 + h, x0 + w, int(src.shape[2])))\n if size is not None and (w, h) != size:\n sizes = (h, w, size[1], size[0])\n out = imresize(out, *size, interp=_get_interp_method(interp, sizes))\n return out\n\n\ndef random_crop(src, size, interp=2):\n \"\"\"Randomly crop `src` with `size` (width, height).\n Upsample result if `src` is smaller than `size`.\n\n Parameters\n ----------\n src: Source image `NDArray`\n size: Size of the crop formatted as (width, height). If the `size` is larger\n than the image, then the source image is upsampled to `size` and returned.\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n Example\n -------\n >>> im = mx.nd.array(cv2.imread(\"flower.jpg\"))\n >>> cropped_im, rect = mx.image.random_crop(im, (100, 100))\n >>> print cropped_im\n <NDArray 100x100x1 @cpu(0)>\n >>> print rect\n (20, 21, 100, 100)\n \"\"\"\n\n h, w, _ = src.shape\n new_w, new_h = scale_down((w, h), size)\n\n x0 = random.randint(0, w - new_w)\n y0 = random.randint(0, h - new_h)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n\ndef center_crop(src, size, interp=2):\n \"\"\"Crops the image `src` to the given `size` by trimming on all four\n sides and preserving the center of the image. Upsamples if `src` is smaller\n than `size`.\n\n .. note:: This requires MXNet to be compiled with USE_OPENCV.\n\n Parameters\n ----------\n src : NDArray\n Binary source image data.\n size : list or tuple of int\n The desired output image size.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n\n Returns\n -------\n NDArray\n The cropped image.\n Tuple\n (x, y, width, height) where x, y are the positions of the crop in the\n original image and width, height the dimensions of the crop.\n\n Example\n -------\n >>> with open(\"flower.jpg\", 'rb') as fp:\n ... str_image = fp.read()\n ...\n >>> image = mx.image.imdecode(str_image)\n >>> image\n <NDArray 2321x3482x3 @cpu(0)>\n >>> cropped_image, (x, y, width, height) = mx.image.center_crop(image, (1000, 500))\n >>> cropped_image\n <NDArray 500x1000x3 @cpu(0)>\n >>> x, y, width, height\n (1241, 910, 1000, 500)\n \"\"\"\n\n h, w, _ = src.shape\n new_w, new_h = scale_down((w, h), size)\n\n x0 = int((w - new_w) / 2)\n y0 = int((h - new_h) / 2)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n\ndef color_normalize(src, mean, std=None):\n \"\"\"Normalize src with mean and std.\n\n Parameters\n ----------\n src : NDArray\n Input image\n mean : NDArray\n RGB mean to be subtracted\n std : NDArray\n RGB standard deviation to be divided\n\n Returns\n -------\n NDArray\n An `NDArray` containing the normalized image.\n \"\"\"\n if mean is not None:\n src -= mean\n if std is not None:\n src /= std\n return src\n\n\ndef random_size_crop(src, size, area, ratio, interp=2, **kwargs):\n \"\"\"Randomly crop src with size. Randomize area and aspect ratio.\n\n Parameters\n ----------\n src : NDArray\n Input image\n size : tuple of (int, int)\n Size of the crop formatted as (width, height).\n area : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\n ratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n Returns\n -------\n NDArray\n An `NDArray` containing the cropped image.\n Tuple\n A tuple (x, y, width, height) where (x, y) is top-left position of the crop in the\n original image and (width, height) are the dimensions of the cropped image.\n\n \"\"\"\n h, w, _ = src.shape\n src_area = h * w\n\n if 'min_area' in kwargs:\n warnings.warn('`min_area` is deprecated. Please use `area` instead.',\n DeprecationWarning)\n area = kwargs.pop('min_area')\n assert not kwargs, \"unexpected keyword arguments for `random_size_crop`.\"\n\n if isinstance(area, numeric_types):\n area = (area, 1.0)\n for _ in range(10):\n target_area = random.uniform(area[0], area[1]) * src_area\n log_ratio = (np.log(ratio[0]), np.log(ratio[1]))\n new_ratio = np.exp(random.uniform(*log_ratio))\n\n new_w = int(round(np.sqrt(target_area * new_ratio)))\n new_h = int(round(np.sqrt(target_area / new_ratio)))\n\n if new_w <= w and new_h <= h:\n x0 = random.randint(0, w - new_w)\n y0 = random.randint(0, h - new_h)\n\n out = fixed_crop(src, x0, y0, new_w, new_h, size, interp)\n return out, (x0, y0, new_w, new_h)\n\n # fall back to center_crop\n return center_crop(src, size, interp)\n\n\nclass Augmenter(object):\n \"\"\"Image Augmenter base class\"\"\"\n def __init__(self, **kwargs):\n self._kwargs = kwargs\n for k, v in self._kwargs.items():\n if isinstance(v, nd.NDArray):\n v = v.asnumpy()\n if isinstance(v, np.ndarray):\n v = v.tolist()\n self._kwargs[k] = v\n\n def dumps(self):\n \"\"\"Saves the Augmenter to string\n\n Returns\n -------\n str\n JSON formatted string that describes the Augmenter.\n \"\"\"\n return json.dumps([self.__class__.__name__.lower(), self._kwargs])\n\n def __call__(self, src):\n \"\"\"Abstract implementation body\"\"\"\n raise NotImplementedError(\"Must override implementation.\")\n\n\nclass SequentialAug(Augmenter):\n \"\"\"Composing a sequential augmenter list.\n\n Parameters\n ----------\n ts : list of augmenters\n A series of augmenters to be applied in sequential order.\n \"\"\"\n def __init__(self, ts):\n super(SequentialAug, self).__init__()\n self.ts = ts\n\n def dumps(self):\n \"\"\"Override the default to avoid duplicate dump.\"\"\"\n return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n for aug in self.ts:\n src = aug(src)\n return src\n\n\nclass ResizeAug(Augmenter):\n \"\"\"Make resize shorter edge to size augmenter.\n\n Parameters\n ----------\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(ResizeAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return resize_short(src, self.size, self.interp)\n\n\nclass ForceResizeAug(Augmenter):\n \"\"\"Force resize to size regardless of aspect ratio\n\n Parameters\n ----------\n size : tuple of (int, int)\n The desired size as in (width, height)\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(ForceResizeAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n sizes = (src.shape[0], src.shape[1], self.size[1], self.size[0])\n return imresize(src, *self.size, interp=_get_interp_method(self.interp, sizes))\n\n\nclass RandomCropAug(Augmenter):\n \"\"\"Make random crop augmenter\n\n Parameters\n ----------\n size : int\n The length to be set for the shorter edge.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(RandomCropAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return random_crop(src, self.size, self.interp)[0]\n\n\nclass RandomSizedCropAug(Augmenter):\n \"\"\"Make random crop with random resizing and random aspect ratio jitter augmenter.\n\n Parameters\n ----------\n size : tuple of (int, int)\n Size of the crop formatted as (width, height).\n area : float in (0, 1] or tuple of (float, float)\n If tuple, minimum area and maximum area to be maintained after cropping\n If float, minimum area to be maintained after cropping, maximum area is set to 1.0\n ratio : tuple of (float, float)\n Aspect ratio range as (min_aspect_ratio, max_aspect_ratio)\n interp: int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, area, ratio, interp=2, **kwargs):\n super(RandomSizedCropAug, self).__init__(size=size, area=area,\n ratio=ratio, interp=interp)\n self.size = size\n if 'min_area' in kwargs:\n warnings.warn('`min_area` is deprecated. Please use `area` instead.',\n DeprecationWarning)\n self.area = kwargs.pop('min_area')\n else:\n self.area = area\n self.ratio = ratio\n self.interp = interp\n assert not kwargs, \"unexpected keyword arguments for `RandomSizedCropAug`.\"\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return random_size_crop(src, self.size, self.area, self.ratio, self.interp)[0]\n\n\nclass CenterCropAug(Augmenter):\n \"\"\"Make center crop augmenter.\n\n Parameters\n ----------\n size : list or tuple of int\n The desired output image size.\n interp : int, optional, default=2\n Interpolation method. See resize_short for details.\n \"\"\"\n def __init__(self, size, interp=2):\n super(CenterCropAug, self).__init__(size=size, interp=interp)\n self.size = size\n self.interp = interp\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return center_crop(src, self.size, self.interp)[0]\n\n\nclass RandomOrderAug(Augmenter):\n \"\"\"Apply list of augmenters in random order\n\n Parameters\n ----------\n ts : list of augmenters\n A series of augmenters to be applied in random order\n \"\"\"\n def __init__(self, ts):\n super(RandomOrderAug, self).__init__()\n self.ts = ts\n\n def dumps(self):\n \"\"\"Override the default to avoid duplicate dump.\"\"\"\n return [self.__class__.__name__.lower(), [x.dumps() for x in self.ts]]\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n random.shuffle(self.ts)\n for t in self.ts:\n src = t(src)\n return src\n\n\nclass BrightnessJitterAug(Augmenter):\n \"\"\"Random brightness jitter augmentation.\n\n Parameters\n ----------\n brightness : float\n The brightness jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, brightness):\n super(BrightnessJitterAug, self).__init__(brightness=brightness)\n self.brightness = brightness\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-self.brightness, self.brightness)\n src *= alpha\n return src\n\n\nclass ContrastJitterAug(Augmenter):\n \"\"\"Random contrast jitter augmentation.\n\n Parameters\n ----------\n contrast : float\n The contrast jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, contrast):\n super(ContrastJitterAug, self).__init__(contrast=contrast)\n self.contrast = contrast\n self.coef = nd.array([[[0.299, 0.587, 0.114]]])\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-self.contrast, self.contrast)\n gray = src * self.coef\n gray = (3.0 * (1.0 - alpha) / gray.size) * nd.sum(gray)\n src *= alpha\n src += gray\n return src\n\n\nclass SaturationJitterAug(Augmenter):\n \"\"\"Random saturation jitter augmentation.\n\n Parameters\n ----------\n saturation : float\n The saturation jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, saturation):\n super(SaturationJitterAug, self).__init__(saturation=saturation)\n self.saturation = saturation\n self.coef = nd.array([[[0.299, 0.587, 0.114]]])\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = 1.0 + random.uniform(-self.saturation, self.saturation)\n gray = src * self.coef\n gray = nd.sum(gray, axis=2, keepdims=True)\n gray *= (1.0 - alpha)\n src *= alpha\n src += gray\n return src\n\n\nclass HueJitterAug(Augmenter):\n \"\"\"Random hue jitter augmentation.\n\n Parameters\n ----------\n hue : float\n The hue jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, hue):\n super(HueJitterAug, self).__init__(hue=hue)\n self.hue = hue\n self.tyiq = np.array([[0.299, 0.587, 0.114],\n [0.596, -0.274, -0.321],\n [0.211, -0.523, 0.311]])\n self.ityiq = np.array([[1.0, 0.956, 0.621],\n [1.0, -0.272, -0.647],\n [1.0, -1.107, 1.705]])\n\n def __call__(self, src):\n \"\"\"Augmenter body.\n Using approximate linear transfomation described in:\n https://beesbuzz.biz/code/hsv_color_transforms.php\n \"\"\"\n alpha = random.uniform(-self.hue, self.hue)\n u = np.cos(alpha * np.pi)\n w = np.sin(alpha * np.pi)\n bt = np.array([[1.0, 0.0, 0.0],\n [0.0, u, -w],\n [0.0, w, u]])\n t = np.dot(np.dot(self.ityiq, bt), self.tyiq).T\n src = nd.dot(src, nd.array(t))\n return src\n\n\nclass ColorJitterAug(RandomOrderAug):\n \"\"\"Apply random brightness, contrast and saturation jitter in random order.\n\n Parameters\n ----------\n brightness : float\n The brightness jitter ratio range, [0, 1]\n contrast : float\n The contrast jitter ratio range, [0, 1]\n saturation : float\n The saturation jitter ratio range, [0, 1]\n \"\"\"\n def __init__(self, brightness, contrast, saturation):\n ts = []\n if brightness > 0:\n ts.append(BrightnessJitterAug(brightness))\n if contrast > 0:\n ts.append(ContrastJitterAug(contrast))\n if saturation > 0:\n ts.append(SaturationJitterAug(saturation))\n super(ColorJitterAug, self).__init__(ts)\n\n\nclass LightingAug(Augmenter):\n \"\"\"Add PCA based noise.\n\n Parameters\n ----------\n alphastd : float\n Noise level\n eigval : 3x1 np.array\n Eigen values\n eigvec : 3x3 np.array\n Eigen vectors\n \"\"\"\n def __init__(self, alphastd, eigval, eigvec):\n super(LightingAug, self).__init__(alphastd=alphastd, eigval=eigval, eigvec=eigvec)\n self.alphastd = alphastd\n self.eigval = eigval\n self.eigvec = eigvec\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n alpha = np.random.normal(0, self.alphastd, size=(3,))\n rgb = np.dot(self.eigvec * alpha, self.eigval)\n src += nd.array(rgb)\n return src\n\n\nclass ColorNormalizeAug(Augmenter):\n \"\"\"Mean and std normalization.\n\n Parameters\n ----------\n mean : NDArray\n RGB mean to be subtracted\n std : NDArray\n RGB standard deviation to be divided\n \"\"\"\n def __init__(self, mean, std):\n super(ColorNormalizeAug, self).__init__(mean=mean, std=std)\n self.mean = mean if mean is None or isinstance(mean, nd.NDArray) else nd.array(mean)\n self.std = std if std is None or isinstance(std, nd.NDArray) else nd.array(std)\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n return color_normalize(src, self.mean, self.std)\n\n\nclass RandomGrayAug(Augmenter):\n \"\"\"Randomly convert to gray image.\n\n Parameters\n ----------\n p : float\n Probability to convert to grayscale\n \"\"\"\n def __init__(self, p):\n super(RandomGrayAug, self).__init__(p=p)\n self.p = p\n self.mat = nd.array([[0.21, 0.21, 0.21],\n [0.72, 0.72, 0.72],\n [0.07, 0.07, 0.07]])\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n if random.random() < self.p:\n src = nd.dot(src, self.mat)\n return src\n\n\nclass HorizontalFlipAug(Augmenter):\n \"\"\"Random horizontal flip.\n\n Parameters\n ----------\n p : float\n Probability to flip image horizontally\n \"\"\"\n def __init__(self, p):\n super(HorizontalFlipAug, self).__init__(p=p)\n self.p = p\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n if random.random() < self.p:\n src = nd.flip(src, axis=1)\n return src\n\n\nclass CastAug(Augmenter):\n \"\"\"Cast to float32\"\"\"\n def __init__(self, typ='float32'):\n super(CastAug, self).__init__(type=typ)\n self.typ = typ\n\n def __call__(self, src):\n \"\"\"Augmenter body\"\"\"\n src = src.astype(self.typ)\n return src\n\n\ndef CreateAugmenter(data_shape, resize=0, rand_crop=False, rand_resize=False, rand_mirror=False,\n mean=None, std=None, brightness=0, contrast=0, saturation=0, hue=0,\n pca_noise=0, rand_gray=0, inter_method=2):\n \"\"\"Creates an augmenter list.\n\n Parameters\n ----------\n data_shape : tuple of int\n Shape for output data\n resize : int\n Resize shorter edge if larger than 0 at the begining\n rand_crop : bool\n Whether to enable random cropping other than center crop\n rand_resize : bool\n Whether to enable random sized cropping, require rand_crop to be enabled\n rand_gray : float\n [0, 1], probability to convert to grayscale for all channels, the number\n of channels will not be reduced to 1\n rand_mirror : bool\n Whether to apply horizontal flip to image with probability 0.5\n mean : np.ndarray or None\n Mean pixel values for [r, g, b]\n std : np.ndarray or None\n Standard deviations for [r, g, b]\n brightness : float\n Brightness jittering range (percent)\n contrast : float\n Contrast jittering range (percent)\n saturation : float\n Saturation jittering range (percent)\n hue : float\n Hue jittering range (percent)\n pca_noise : float\n Pca noise level (percent)\n inter_method : int, default=2(Area-based)\n Interpolation method for all resizing operations\n\n Possible values:\n 0: Nearest Neighbors Interpolation.\n 1: Bilinear interpolation.\n 2: Area-based (resampling using pixel area relation). It may be a\n preferred method for image decimation, as it gives moire-free\n results. But when the image is zoomed, it is similar to the Nearest\n Neighbors method. (used by default).\n 3: Bicubic interpolation over 4x4 pixel neighborhood.\n 4: Lanczos interpolation over 8x8 pixel neighborhood.\n 9: Cubic for enlarge, area for shrink, bilinear for others\n 10: Random select from interpolation method metioned above.\n Note:\n When shrinking an image, it will generally look best with AREA-based\n interpolation, whereas, when enlarging an image, it will generally look best\n with Bicubic (slow) or Bilinear (faster but still looks OK).\n\n Examples\n --------\n >>> # An example of creating multiple augmenters\n >>> augs = mx.image.CreateAugmenter(data_shape=(3, 300, 300), rand_mirror=True,\n ... mean=True, brightness=0.125, contrast=0.125, rand_gray=0.05,\n ... saturation=0.125, pca_noise=0.05, inter_method=10)\n >>> # dump the details\n >>> for aug in augs:\n ... aug.dumps()\n \"\"\"\n auglist = []\n\n if resize > 0:\n auglist.append(ResizeAug(resize, inter_method))\n\n crop_size = (data_shape[2], data_shape[1])\n if rand_resize:\n assert rand_crop\n auglist.append(RandomSizedCropAug(crop_size, 0.08, (3.0 / 4.0, 4.0 / 3.0), inter_method))\n elif rand_crop:\n auglist.append(RandomCropAug(crop_size, inter_method))\n else:\n auglist.append(CenterCropAug(crop_size, inter_method))\n\n if rand_mirror:\n auglist.append(HorizontalFlipAug(0.5))\n\n auglist.append(CastAug())\n\n if brightness or contrast or saturation:\n auglist.append(ColorJitterAug(brightness, contrast, saturation))\n\n if hue:\n auglist.append(HueJitterAug(hue))\n\n if pca_noise > 0:\n eigval = np.array([55.46, 4.794, 1.148])\n eigvec = np.array([[-0.5675, 0.7192, 0.4009],\n [-0.5808, -0.0045, -0.8140],\n [-0.5836, -0.6948, 0.4203]])\n auglist.append(LightingAug(pca_noise, eigval, eigvec))\n\n if rand_gray > 0:\n auglist.append(RandomGrayAug(rand_gray))\n\n if mean is True:\n mean = nd.array([123.68, 116.28, 103.53])\n elif mean is not None:\n assert isinstance(mean, (np.ndarray, nd.NDArray)) and mean.shape[0] in [1, 3]\n\n if std is True:\n std = nd.array([58.395, 57.12, 57.375])\n elif std is not None:\n assert isinstance(std, (np.ndarray, nd.NDArray)) and std.shape[0] in [1, 3]\n\n if mean is not None or std is not None:\n auglist.append(ColorNormalizeAug(mean, std))\n\n return auglist\n\n\nclass ImageIter(io.DataIter):\n \"\"\"Image data iterator with a large number of augmentation choices.\n This iterator supports reading from both .rec files and raw image files.\n\n To load input images from .rec files, use `path_imgrec` parameter and to load from raw image\n files, use `path_imglist` and `path_root` parameters.\n\n To use data partition (for distributed training) or shuffling, specify `path_imgidx` parameter.\n\n Parameters\n ----------\n batch_size : int\n Number of examples per batch.\n data_shape : tuple\n Data shape in (channels, height, width) format.\n For now, only RGB image with 3 channels is supported.\n label_width : int, optional\n Number of labels per example. The default label width is 1.\n path_imgrec : str\n Path to image record file (.rec).\n Created with tools/im2rec.py or bin/im2rec.\n path_imglist : str\n Path to image list (.lst).\n Created with tools/im2rec.py or with custom script.\n Format: Tab separated record of index, one or more labels and relative_path_from_root.\n imglist: list\n A list of images with the label(s).\n Each item is a list [imagelabel: float or list of float, imgpath].\n path_root : str\n Root folder of image files.\n path_imgidx : str\n Path to image index file. Needed for partition and shuffling when using .rec source.\n shuffle : bool\n Whether to shuffle all images at the start of each iteration or not.\n Can be slow for HDD.\n part_index : int\n Partition index.\n num_parts : int\n Total number of partitions.\n data_name : str\n Data name for provided symbols.\n label_name : str\n Label name for provided symbols.\n dtype : str\n Label data type. Default: float32. Other options: int32, int64, float64\n last_batch_handle : str, optional\n How to handle the last batch.\n This parameter can be 'pad'(default), 'discard' or 'roll_over'.\n If 'pad', the last batch will be padded with data starting from the begining\n If 'discard', the last batch will be discarded\n If 'roll_over', the remaining elements will be rolled over to the next iteration\n kwargs : ...\n More arguments for creating augmenter. See mx.image.CreateAugmenter.\n \"\"\"\n\n def __init__(self, batch_size, data_shape, label_width=1,\n path_imgrec=None, path_imglist=None, path_root=None, path_imgidx=None,\n shuffle=False, part_index=0, num_parts=1, aug_list=None, imglist=None,\n data_name='data', label_name='softmax_label', dtype='float32',\n last_batch_handle='pad', **kwargs):\n super(ImageIter, self).__init__()\n assert path_imgrec or path_imglist or (isinstance(imglist, list))\n assert dtype in ['int32', 'float32', 'int64', 'float64'], dtype + ' label not supported'\n num_threads = os.environ.get('MXNET_CPU_WORKER_NTHREADS', 1)\n logging.info('Using %s threads for decoding...', str(num_threads))\n logging.info('Set enviroment variable MXNET_CPU_WORKER_NTHREADS to a'\n ' larger number to use more threads.')\n class_name = self.__class__.__name__\n if path_imgrec:\n logging.info('%s: loading recordio %s...',\n class_name, path_imgrec)\n if path_imgidx:\n self.imgrec = recordio.MXIndexedRecordIO(path_imgidx, path_imgrec, 'r') # pylint: disable=redefined-variable-type\n self.imgidx = list(self.imgrec.keys)\n else:\n self.imgrec = recordio.MXRecordIO(path_imgrec, 'r') # pylint: disable=redefined-variable-type\n self.imgidx = None\n else:\n self.imgrec = None\n\n if path_imglist:\n logging.info('%s: loading image list %s...', class_name, path_imglist)\n with open(path_imglist) as fin:\n imglist = {}\n imgkeys = []\n for line in iter(fin.readline, ''):\n line = line.strip().split('\\t')\n label = nd.array(line[1:-1], dtype=dtype)\n key = int(line[0])\n imglist[key] = (label, line[-1])\n imgkeys.append(key)\n self.imglist = imglist\n elif isinstance(imglist, list):\n logging.info('%s: loading image list...', class_name)\n result = {}\n imgkeys = []\n index = 1\n for img in imglist:\n key = str(index) # pylint: disable=redefined-variable-type\n index += 1\n if len(img) > 2:\n label = nd.array(img[:-1], dtype=dtype)\n elif isinstance(img[0], numeric_types):\n label = nd.array([img[0]], dtype=dtype)\n else:\n label = nd.array(img[0], dtype=dtype)\n result[key] = (label, img[-1])\n imgkeys.append(str(key))\n self.imglist = result\n else:\n self.imglist = None\n self.path_root = path_root\n\n self.check_data_shape(data_shape)\n self.provide_data = [(data_name, (batch_size,) + data_shape)]\n if label_width > 1:\n self.provide_label = [(label_name, (batch_size, label_width))]\n else:\n self.provide_label = [(label_name, (batch_size,))]\n self.batch_size = batch_size\n self.data_shape = data_shape\n self.label_width = label_width\n self.shuffle = shuffle\n if self.imgrec is None:\n self.seq = imgkeys\n elif shuffle or num_parts > 1 or path_imgidx:\n assert self.imgidx is not None\n self.seq = self.imgidx\n else:\n self.seq = None\n\n if num_parts > 1:\n assert part_index < num_parts\n N = len(self.seq)\n C = N // num_parts\n self.seq = self.seq[part_index * C:(part_index + 1) * C]\n if aug_list is None:\n self.auglist = CreateAugmenter(data_shape, **kwargs)\n else:\n self.auglist = aug_list\n self.cur = 0\n self._allow_read = True\n self.last_batch_handle = last_batch_handle\n self.num_image = len(self.seq) if self.seq is not None else None\n self._cache_data = None\n self._cache_label = None\n self._cache_idx = None\n self.reset()\n\n def reset(self):\n \"\"\"Resets the iterator to the beginning of the data.\"\"\"\n if self.seq is not None and self.shuffle:\n random.shuffle(self.seq)\n if self.last_batch_handle != 'roll_over' or \\\n self._cache_data is None:\n if self.imgrec is not None:\n self.imgrec.reset()\n self.cur = 0\n if self._allow_read is False:\n self._allow_read = True\n\n def hard_reset(self):\n \"\"\"Resets the iterator and ignore roll over data\"\"\"\n if self.seq is not None and self.shuffle:\n random.shuffle(self.seq)\n if self.imgrec is not None:\n self.imgrec.reset()\n self.cur = 0\n self._allow_read = True\n self._cache_data = None\n self._cache_label = None\n self._cache_idx = None\n\n def next_sample(self):\n \"\"\"Helper function for reading in next sample.\"\"\"\n if self._allow_read is False:\n raise StopIteration\n if self.seq is not None:\n if self.cur < self.num_image:\n idx = self.seq[self.cur]\n else:\n if self.last_batch_handle != 'discard':\n self.cur = 0\n raise StopIteration\n self.cur += 1\n if self.imgrec is not None:\n s = self.imgrec.read_idx(idx)\n header, img = recordio.unpack(s)\n if self.imglist is None:\n return header.label, img\n else:\n return self.imglist[idx][0], img\n else:\n label, fname = self.imglist[idx]\n return label, self.read_image(fname)\n else:\n s = self.imgrec.read()\n if s is None:\n if self.last_batch_handle != 'discard':\n self.imgrec.reset()\n raise StopIteration\n header, img = recordio.unpack(s)\n return header.label, img\n\n def _batchify(self, batch_data, batch_label, start=0):\n \"\"\"Helper function for batchifying data\"\"\"\n i = start\n batch_size = self.batch_size\n try:\n while i < batch_size:\n label, s = self.next_sample()\n data = self.imdecode(s)\n try:\n self.check_valid_image(data)\n except RuntimeError as e:\n logging.debug('Invalid image, skipping: %s', str(e))\n continue\n data = self.augmentation_transform(data)\n assert i < batch_size, 'Batch size must be multiples of augmenter output length'\n batch_data[i] = self.postprocess_data(data)\n batch_label[i] = label\n i += 1\n except StopIteration:\n if not i:\n raise StopIteration\n return i\n\n def next(self):\n \"\"\"Returns the next batch of data.\"\"\"\n batch_size = self.batch_size\n c, h, w = self.data_shape\n # if last batch data is rolled over\n if self._cache_data is not None:\n # check both the data and label have values\n assert self._cache_label is not None, \"_cache_label didn't have values\"\n assert self._cache_idx is not None, \"_cache_idx didn't have values\"\n batch_data = self._cache_data\n batch_label = self._cache_label\n i = self._cache_idx\n # clear the cache data\n else:\n batch_data = nd.zeros((batch_size, c, h, w))\n batch_label = nd.empty(self.provide_label[0][1])\n i = self._batchify(batch_data, batch_label)\n # calculate the padding\n pad = batch_size - i\n # handle padding for the last batch\n if pad != 0:\n if self.last_batch_handle == 'discard': # pylint: disable=no-else-raise\n raise StopIteration\n # if the option is 'roll_over', throw StopIteration and cache the data\n elif self.last_batch_handle == 'roll_over' and \\\n self._cache_data is None:\n self._cache_data = batch_data\n self._cache_label = batch_label\n self._cache_idx = i\n raise StopIteration\n else:\n _ = self._batchify(batch_data, batch_label, i)\n if self.last_batch_handle == 'pad':\n self._allow_read = False\n else:\n self._cache_data = None\n self._cache_label = None\n self._cache_idx = None\n\n return io.DataBatch([batch_data], [batch_label], pad=pad)\n\n def check_data_shape(self, data_shape):\n \"\"\"Checks if the input data shape is valid\"\"\"\n if not len(data_shape) == 3:\n raise ValueError('data_shape should have length 3, with dimensions CxHxW')\n if not data_shape[0] == 3:\n raise ValueError('This iterator expects inputs to have 3 channels.')\n\n def check_valid_image(self, data):\n \"\"\"Checks if the input data is valid\"\"\"\n if len(data[0].shape) == 0:\n raise RuntimeError('Data shape is wrong')\n\n def imdecode(self, s):\n \"\"\"Decodes a string or byte string to an NDArray.\n See mx.img.imdecode for more details.\"\"\"\n def locate():\n \"\"\"Locate the image file/index if decode fails.\"\"\"\n if self.seq is not None:\n idx = self.seq[(self.cur % self.num_image) - 1]\n else:\n idx = (self.cur % self.num_image) - 1\n if self.imglist is not None:\n _, fname = self.imglist[idx]\n msg = \"filename: {}\".format(fname)\n else:\n msg = \"index: {}\".format(idx)\n return \"Broken image \" + msg\n try:\n img = imdecode(s)\n except Exception as e:\n raise RuntimeError(\"{}, {}\".format(locate(), e))\n return img\n\n def read_image(self, fname):\n \"\"\"Reads an input image `fname` and returns the decoded raw bytes.\n Examples\n --------\n >>> dataIter.read_image('Face.jpg') # returns decoded raw bytes.\n \"\"\"\n with open(os.path.join(self.path_root, fname), 'rb') as fin:\n img = fin.read()\n return img\n\n def augmentation_transform(self, data):\n \"\"\"Transforms input data with specified augmentation.\"\"\"\n for aug in self.auglist:\n data = aug(data)\n return data\n\n def postprocess_data(self, datum):\n \"\"\"Final postprocessing step before image is loaded into the batch.\"\"\"\n return nd.transpose(datum, axes=(2, 0, 1))\n"
] |
[
[
"numpy.dot",
"numpy.log",
"numpy.sqrt",
"numpy.cos",
"numpy.sin",
"numpy.frombuffer",
"numpy.random.normal",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
xuyongzhi/SparseVoxelNet
|
[
"2b8338c3291880ee1ef7739580eeaefb737f6164",
"2b8338c3291880ee1ef7739580eeaefb737f6164"
] |
[
"datasets/all_datasets_meta/datasets_meta.py",
"datasets/graph_util.py"
] |
[
"# May 2018 xyz\n\nfrom __future__ import print_function\nimport os\nimport sys, glob\nimport numpy as np\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(os.path.join(ROOT_DIR))\n\nDATASETS = ['MATTERPORT', 'SCANNET', 'ETH', 'MODELNET40']\nfor ds in DATASETS:\n sys.path.append('%s/%s_util'%(ROOT_DIR,ds))\n\nfrom MATTERPORT_util.MATTERPORT_util import MATTERPORT_Meta\nfrom SCANNET_util import SCANNET_Meta\nfrom ETH_util import ETH_Meta\nfrom MODELNET_util import MODELNET40_Meta\nDATASETS_Meta = [MATTERPORT_Meta, SCANNET_Meta, ETH_Meta, MODELNET40_Meta]\n\n\nclass DatasetsMeta():\n g_label2class = {}\n g_label_names = {}\n g_unlabelled_categories = {}\n g_easy_categories = {}\n g_label2color = {}\n g_bad_files = {}\n\n for i in range(len(DATASETS)):\n DS_i = DATASETS[i]\n DS_Meta_i = DATASETS_Meta[i]\n g_label2class[DS_i] = DS_Meta_i['label2class']\n g_label_names[DS_i] = DS_Meta_i['label_names']\n g_label2color[DS_i] = DS_Meta_i['label2color']\n g_easy_categories[DS_i] = DS_Meta_i['easy_categories']\n g_unlabelled_categories[DS_i] = DS_Meta_i['unlabelled_categories']\n if 'bad_files' in DS_Meta_i:\n g_bad_files[DS_i] = DS_Meta_i['bad_files']\n else:\n g_bad_files[DS_i] = []\n\n ##---------------------------------------------------------------------------\n\n #---------------------------------------------------------------------------\n g_label2class['STANFORD_INDOOR3D'] = \\\n {0:'ceiling', 1:'floor', 2:'wall', 3:'beam', 4:'column', 5:'window', 6:'door', 7:'table',\n 8:'chair', 9:'sofa', 10:'bookcase', 11:'board', 12:'clutter'}\n g_unlabelled_categories['STANFORD_INDOOR3D'] = [12]\n g_easy_categories['STANFORD_INDOOR3D'] = []\n g_label2color['STANFORD_INDOOR3D'] = \\\n {0:\t[0,0,0],1:\t[0,0,255],2:\t[0,255,255],3: [255,255,0],4: [255,0,255],10: [100,100,255],\n 6: [0,255,0],7: [170,120,200],8: [255,0,0],9: [200,100,100],5:[10,200,100],11:[200,200,200],12:[200,200,100]}\n\n #---------------------------------------------------------------------------\n g_label2class['KITTI'] = {0:'background', 1:'car', 2:'pedestrian', 3:'cyclist'} ## benz_m\n g_unlabelled_categories['KITTI'] = []\n g_label2color['KITTI'] = { 0:[0,0,0], 1:[0,0,255], 2:[0,255,255], 3:[255,255,0] } ## benz_m\n g_easy_categories['KITTI'] = []\n\n def __init__(self,datasource_name):\n self.datasource_name = datasource_name\n self.label2class = self.g_label2class[self.datasource_name]\n self.label_names = [self.label2class[l] for l in range(len(self.label2class))]\n self.label2color = self.g_label2color[self.datasource_name]\n self.class2label = {cls:label for label,cls in self.label2class.iteritems()}\n self.class2color = {}\n for i in self.label2class:\n cls = self.label2class[i]\n self.class2color[cls] = self.label2color[i]\n self.num_classes = len(self.label2class)\n #self.num_classes = len(self.g_label2class) - len(self.g_unlabelled_categories[self.datasource_name])\n\n self.bad_files = self.g_bad_files[datasource_name]\n\n def get_train_test_file_list(self, data_dir, is_training):\n if self.datasource_name == \"MODELNET40\":\n return self.get_train_test_file_list_MODELNET(data_dir, is_training)\n if self.datasource_name == \"MATTERPORT\":\n return self.get_train_test_file_list_MATTERPORT(data_dir, is_training)\n\n def get_train_test_file_list_MATTERPORT(self, data_dir, is_training):\n from MATTERPORT_util.MATTERPORT_util import benchmark\n tte_scene_names = benchmark()\n split = 'train' if is_training else 'test'\n scene_names = tte_scene_names[split]\n tte_fnum = {}\n tte_fnum['train'] = 1554\n tte_fnum['test'] = 406\n tte_fnum['val'] = 234\n\n all_fns = glob.glob(os.path.join(data_dir, '*.tfrecord'))\n #assert len(all_fns) == 2194, len(all_fns)\n def sence_name(fn):\n return os.path.basename(fn).split('_')[0]\n the_fns = [e for e in all_fns if sence_name(e) in scene_names]\n #assert len(the_fns) == tte_fnum[split]\n return the_fns\n\n def get_train_test_file_list_MODELNET(self, data_dir, is_training):\n from MODELNET_util import train_names, test_names\n if is_training:\n train_names = train_names()\n train_fns = [os.path.join(data_dir, e+'.tfrecord') for e in train_names]\n # Check exist\n for e in train_fns[0:len(train_fns):100]:\n assert( os.path.exists(e) )\n assert len(train_fns) == 9843\n\n return train_fns\n\n else:\n test_names = test_names()\n test_fns = [os.path.join(data_dir, e+'.tfrecord') for e in test_names]\n for e in test_fns[0:len(test_fns):10]:\n assert( os.path.exists(e) )\n assert len(test_fns) == 2468\n\n return test_fns\n\n\n\n\ndef show_all_colors( datasource_name ):\n from PIL import Image\n dset_meta = DatasetsMeta(datasource_name)\n label2color = dset_meta.label2color\n label2class = dset_meta.label2class\n path = os.path.join( BASE_DIR,'label_colors_'+datasource_name )\n if not os.path.exists(path):\n os.makedirs(path)\n for label,color in label2color.iteritems():\n if label < len( label2class ):\n cls = label2class[label]\n else:\n cls = 'empty'\n data = np.zeros((512,512,3),dtype=np.uint8)\n color_ = np.array(color,dtype=np.uint8)\n data += color_\n img = Image.fromarray(data,'RGB')\n fn = path+'/'+str(label)+'_'+cls+'.png'\n img.save(fn)\n print(fn)\n img.show()\n\nif __name__ == '__main__':\n show_all_colors('MATTERPORT')\n\n",
"# Sep 18\n\nimport h5py, os, glob,sys\nimport numpy as np\nimport tensorflow as tf\nimport math\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(BASE_DIR)\nsys.path.append(ROOT_DIR)\n\nfrom datasets.all_datasets_meta.datasets_meta import DatasetsMeta\nimport utils.ply_util as ply_util\nfrom utils.tf_util import TfUtil\n\nMAX_FLOAT_DRIFT = 1e-6\nDEBUG = False\n\n\n\ndef tsize(tensor):\n return len(get_tensor_shape(tensor))\n\ndef get_tensor_shape(t):\n return TfUtil.get_tensor_shape(t)\n\n\nclass MeshSampling():\n _full_edge_dis = 3\n _max_norm_dif_angle = 15.0\n _check_optial = True\n\n _edgev_num = 14\n\n _max_nf_perv = 9\n\n _vertex_eles = ['color', 'xyz', 'nxnynz', 'fidx_per_vertex', 'edgev_per_vertex', 'valid_ev_num_pv',\\\n 'edges_per_vertex', 'edges_pv_empty_mask', 'fidx_pv_empty_mask',\\\n 'same_normal_mask', 'same_category_mask',\\\n 'label_category', 'label_instance', 'label_material']\n _face_eles = ['label_raw_category', 'label_instance', 'label_material', \\\n 'label_category', 'vidx_per_face', ]\n\n _fi = 0\n _only_vertex = True\n\n @staticmethod\n def sess_split_sampling_rawmesh(raw_datas, _num_vertex_sp, splited_vidx,\n dset_metas, parse_local_graph_pv, ply_dir):\n raw_vertex_nums = [e.shape[0] if type(e)!=type(None) else raw_datas['xyz'].shape[0]\\\n for e in splited_vidx]\n with tf.Graph().as_default():\n #with tf.device('/device:GPU:0'):\n with tf.device('/CPU:0'):\n raw_datas_pl = {}\n for item in raw_datas:\n type_i = eval( 'tf.' + str(raw_datas[item].dtype) )\n shape_i = raw_datas[item].shape\n raw_datas_pl[item] = tf.placeholder(type_i, shape_i, item+'_pl')\n block_num = len(splited_vidx)\n splited_vidx_pl = []\n if block_num==1:\n splited_vidx_pl_ = [None]\n else:\n for bi in range(block_num):\n splited_vidx_pl.append( tf.placeholder(tf.int32, splited_vidx[bi].shape,\n 'splited_vidx_%d_pl'%(bi)) )\n splited_vidx_pl_ = [tf.identity(e) for e in splited_vidx_pl]\n\n mesh_summary_ = {}\n splited_sampled_datas_ = MeshSampling.main_split_sampling_rawmesh(\\\n raw_datas_pl.copy(), _num_vertex_sp, splited_vidx_pl_,\n dset_metas, parse_local_graph_pv, ply_dir, mesh_summary_)\n\n config=tf.ConfigProto(allow_soft_placement=True,\n device_count={\"CPU\": 8},\n inter_op_parallelism_threads=6,\n intra_op_parallelism_threads=6)\n config.gpu_options.allow_growth = True\n with tf.Session(config=config) as sess:\n feed_dict = {}\n for item in raw_datas:\n feed_dict[raw_datas_pl[item]] = raw_datas[item]\n\n if block_num>1:\n for bi in range(block_num):\n feed_dict[splited_vidx_pl[bi]] = splited_vidx[bi]\n\n splited_sampled_datas = sess.run(splited_sampled_datas_, feed_dict=feed_dict)\n\n return splited_sampled_datas, raw_vertex_nums, {}\n\n\n @staticmethod\n def eager_split_sampling_rawmesh(raw_datas, _num_vertex_sp, splited_vidx,\n dset_metas, parse_local_graph_pv,\n ply_dir=None):\n start = MeshSampling._fi == 0\n MeshSampling._fi += 1\n if start:\n tf.enable_eager_execution()\n\n raw_vertex_nums = [e.shape[0] if type(e)!=type(None) else raw_datas['xyz'].shape[0]\\\n for e in splited_vidx]\n mesh_summary = {}\n splited_sampled_datas = MeshSampling.main_split_sampling_rawmesh(\n raw_datas, _num_vertex_sp, splited_vidx, dset_metas,\n parse_local_graph_pv,\n ply_dir=ply_dir, mesh_summary=mesh_summary)\n\n bn = len(splited_sampled_datas)\n for bi in range(bn):\n for item in splited_sampled_datas[bi]:\n if isinstance(splited_sampled_datas[bi][item], tf.Tensor):\n splited_sampled_datas[bi][item] = splited_sampled_datas[bi][item].numpy()\n for key in mesh_summary:\n if isinstance(mesh_summary[key], tf.Tensor):\n mesh_summary[key] = mesh_summary[key].numpy()\n return splited_sampled_datas, raw_vertex_nums, mesh_summary\n\n\n @staticmethod\n def main_split_sampling_rawmesh(raw_datas, _num_vertex_sp, splited_vidx,\n dset_metas, parse_local_graph_pv,\n ply_dir=None, mesh_summary={}):\n\n is_show_shapes = False\n IsGenply_Raw = False\n IsGenply_Cleaned = False\n IsGenply_SameMask = False\n IsGenply_Splited = False\n IsGenply_SplitedSampled = False\n\n if IsGenply_Raw:\n GenPlys.gen_mesh_ply_basic(raw_datas, 'Raw', 'raw', ply_dir)\n t_start = tf.timestamp()\n #***************************************************************************\n # rm some labels\n with tf.variable_scope('rm_some_labels'):\n raw_datas, splited_vidx = MeshSampling.rm_some_labels(raw_datas, dset_metas, splited_vidx)\n if IsGenply_Cleaned:\n GenPlys.gen_mesh_ply_basic(raw_datas, 'Cleaned', 'Cleaned', ply_dir)\n # check not all void vertices\n valid_num_vertex = tf.shape(raw_datas['xyz'])[0]\n check_enough_nonvoid = tf.assert_greater(valid_num_vertex, 1000,\n message=\"not enough nonvoid vertex: {}. Add to bad file list\".format(\n valid_num_vertex))\n with tf.control_dependencies([check_enough_nonvoid]):\n raw_datas['xyz'] = tf.identity(raw_datas['xyz'])\n #***************************************************************************\n if parse_local_graph_pv:\n #face idx per vetex, edges per vertyex\n num_vertex0 = TfUtil.tshape0(raw_datas['xyz'])\n fidx_per_vertex, fidx_pv_empty_mask, edgev_per_vertex, valid_ev_num_pv, \\\n edges_per_vertex, edges_pv_empty_mask, lonely_vertex_idx = \\\n MeshSampling.get_fidx_nbrv_per_vertex(\n raw_datas['vidx_per_face'], num_vertex0, xyz=raw_datas['xyz'],\n norm = raw_datas['nxnynz'], mesh_summary=mesh_summary)\n raw_datas['fidx_per_vertex'] = fidx_per_vertex\n raw_datas['fidx_pv_empty_mask'] = fidx_pv_empty_mask\n raw_datas['edgev_per_vertex'] = edgev_per_vertex\n raw_datas['valid_ev_num_pv'] = valid_ev_num_pv\n raw_datas['edges_per_vertex'] = edges_per_vertex\n raw_datas['edges_pv_empty_mask'] = edges_pv_empty_mask\n\n if is_show_shapes:\n MeshSampling.show_datas_shape(raw_datas, 'raw datas')\n\n #***************************************************************************\n parse_same_mask = False\n if parse_same_mask:\n # same mask\n same_normal_mask, same_category_mask = MeshSampling.get_simplicity_label(\n fidx_per_vertex, fidx_pv_empty_mask,\n edges_per_vertex, edges_pv_empty_mask,\n raw_datas['nxnynz'],\n raw_datas['label_category'],\n raw_datas['label_instance'])\n same_norm_cat_mask = (same_normal_mask + same_category_mask) / 2\n\n raw_datas['same_normal_mask'] = tf.expand_dims(same_normal_mask,1)\n raw_datas['same_category_mask'] = tf.expand_dims(same_category_mask,1)\n\n if IsGenply_SameMask:\n MeshSampling.gen_ply_raw(raw_datas, same_normal_mask,\n same_category_mask, same_norm_cat_mask, ply_dir)\n\n #***************************************************************************\n # split mesh\n block_num = len(splited_vidx)\n if block_num==1:\n splited_datas = [raw_datas]\n else:\n with tf.variable_scope('split_vertex'):\n splited_datas = MeshSampling.split_vertex(raw_datas, splited_vidx, mesh_summary)\n\n if IsGenply_Splited:\n for bi in range(block_num):\n GenPlys.gen_mesh_ply_basic(splited_datas[bi], 'Splited' ,'Block_{}'.format(bi), ply_dir)\n #***************************************************************************\n # sampling\n for bi in range(block_num):\n with tf.variable_scope('sampling_mesh'):\n splited_datas[bi] = MeshSampling.sampling_mesh(\n _num_vertex_sp, splited_datas[bi], mesh_summary)\n splited_sampled_datas = splited_datas\n mesh_summary['t'] = tf.timestamp() - t_start\n\n if IsGenply_SplitedSampled:\n for bi in range(block_num):\n GenPlys.gen_mesh_ply_basic(splited_sampled_datas[bi], 'SplitedSampled',\n 'Block{}_sampled_{}'.format(bi, _num_vertex_sp), ply_dir, gen_edgev=True)\n\n if is_show_shapes:\n MeshSampling.show_datas_shape(splited_sampled_datas, 'sampled datas')\n\n return splited_sampled_datas\n\n\n @staticmethod\n def split_vertex(raw_datas, splited_vidx, mesh_summary):\n num_vertex0 = TfUtil.tshape0(raw_datas['xyz'])\n\n # get splited_fidx\n bn = len(splited_vidx)\n splited_fidx = []\n vidx_per_face_new_ls = []\n edgev_per_vertex_new_ls = []\n valid_ev_num_pv_new_ls = []\n\n if 'vidx_per_face' not in raw_datas or raw_datas['vidx_per_face'] is None:\n splited_fidx = vidx_per_face_new_ls = edgev_per_vertex_new_ls =\\\n valid_ev_num_pv_new_ls = [None]*bn\n\n else:\n for bi,block_vidx in enumerate(splited_vidx):\n with tf.variable_scope('spv_dsf_b%d'%(bi)):\n if 'edgev_per_vertex' in raw_datas:\n edgev_per_vertex = raw_datas['edgev_per_vertex']\n valid_ev_num_pv = raw_datas['valid_ev_num_pv']\n else:\n edgev_per_vertex = None\n valid_ev_num_pv = None\n\n face_sp_indices, vidx_per_face_new, edgev_per_vertex_new, valid_ev_num_pv_new \\\n = MeshSampling.update_face_edgev(\\\n block_vidx, num_vertex0, raw_datas['vidx_per_face'],\n edgev_per_vertex, valid_ev_num_pv, raw_datas['xyz'], mesh_summary)\n splited_fidx.append(face_sp_indices)\n vidx_per_face_new_ls.append(vidx_per_face_new)\n edgev_per_vertex_new_ls.append(edgev_per_vertex_new)\n valid_ev_num_pv_new_ls.append(valid_ev_num_pv_new)\n\n # do split\n splited_datas = []\n for bi,block_vidx in enumerate(splited_vidx):\n block_datas = MeshSampling.gather_datas(raw_datas, block_vidx,\n splited_fidx[bi], vidx_per_face_new_ls[bi],\n edgev_per_vertex_new_ls[bi], valid_ev_num_pv_new_ls[bi])\n splited_datas.append(block_datas)\n #MeshSampling.show_datas_shape(block_datas, 'block %d'%(bi))\n return splited_datas\n\n @staticmethod\n def split_face(splited_datas):\n block\n\n @staticmethod\n def show_datas_shape(datas, flag=''):\n shape_strs = '\\n{}\\n'.format(flag)\n def show_one_item(item):\n if item not in datas:\n return ''\n di = datas[item]\n if isinstance(di, tf.Tensor):\n shape_str = di.shape.as_list()\n else:\n shape_str = str(di.shape)\n shape_str = '\\t{}: {}\\n'.format(item, shape_str)\n return shape_str\n\n for item in MeshSampling._vertex_eles:\n shape_strs += show_one_item(item)\n shape_strs += '\\n'\n for item in MeshSampling._face_eles:\n shape_strs += show_one_item(item)\n print(shape_strs)\n return shape_strs\n\n @staticmethod\n def get_fidx_nbrv_per_vertex(vidx_per_face, num_vertex0, xyz=None, norm=None, mesh_summary={}):\n '''\n Inputs: [F,3] []\n Output: [N, ?]\n '''\n num_face = tf.shape(vidx_per_face)[0]\n face_indices = tf.reshape(tf.range(0, num_face), [-1,1,1])\n face_indices = tf.tile(face_indices, [1, 3,1])\n vidx_per_face = tf.expand_dims(vidx_per_face, 2)\n vidx_fidx = tf.concat([vidx_per_face, face_indices], 2)\n vidx_fidx = tf.reshape(vidx_fidx, [-1, 2])\n\n #***************************************************************************\n # shuffle before sort if need to sample later\n\n # sort by vidx. Put all fidx belong to same vertex together\n sort_indices = tf.contrib.framework.argsort(vidx_fidx[:,0])\n vidx_fidx_flat_sorted = tf.gather(vidx_fidx, sort_indices)\n\n #***************************************************************************\n # get unique indices\n vidx_unique, vidx_perf, nface_per_v = tf.unique_with_counts(vidx_fidx_flat_sorted[:,0])\n check_vertex_num = tf.assert_equal(vidx_unique[-1]+1, num_vertex0,\n message=\"num_vertex incorrect\")\n with tf.control_dependencies([check_vertex_num]):\n nface_per_v = tf.identity(nface_per_v)\n max_nf_perv = tf.reduce_max(nface_per_v)\n max_nf_perv = tf.maximum(max_nf_perv, MeshSampling._max_nf_perv)\n mean_nf_perv = tf.reduce_mean(nface_per_v)\n min_nf_perv = tf.reduce_min(nface_per_v)\n\n #***************************************************************************\n # get face idx per vertex flat\n nface_cumsum0 = tf.cumsum(nface_per_v)[0:-1]\n nface_cumsum0 = tf.concat([tf.constant([0], tf.int32), nface_cumsum0], 0)\n nface_cumsum1 = tf.gather(nface_cumsum0, vidx_perf)\n auged_vidx = tf.range(tf.shape(vidx_fidx_flat_sorted)[0])\n fidx_per_v_flat = tf.expand_dims(auged_vidx - nface_cumsum1, 1)\n\n #***************************************************************************\n # reshape\n vidx_fidxperv = tf.concat([vidx_fidx_flat_sorted[:,0:1], fidx_per_v_flat], 1)\n\n fidx_per_vertex = tf.scatter_nd(vidx_fidxperv, vidx_fidx_flat_sorted[:,1]+1, \\\n [num_vertex0, max_nf_perv]) - 1\n fidx_pv_empty_mask = tf.equal(fidx_per_vertex, -1)\n\n #show_ave_num_face_perv\n num_face_perv = tf.reduce_sum(tf.cast(fidx_pv_empty_mask, tf.int32), -1)\n ave_num_face_perv = tf.reduce_mean(num_face_perv)\n mesh_summary['ave_num_face_perv'] = ave_num_face_perv\n fidx_per_vertex = tf.Print(fidx_per_vertex, [ave_num_face_perv],\n \"\\nave_num_face_perv: \")\n\n # fix size\n fidx_per_vertex = fidx_per_vertex[:, 0:MeshSampling._max_nf_perv]\n nv0 = num_vertex0 if not isinstance(num_vertex0, tf.Tensor) else None\n fidx_per_vertex.set_shape([nv0, MeshSampling._max_nf_perv])\n fidx_pv_empty_mask = fidx_pv_empty_mask[:, 0:MeshSampling._max_nf_perv]\n fidx_pv_empty_mask.set_shape([nv0, MeshSampling._max_nf_perv])\n\n #***************************************************************************\n # check is there any vertex belong to no faces\n lonely_vertex_mask = tf.equal(fidx_per_vertex[:,0], -1)\n any_lonely_vertex = tf.reduce_any(lonely_vertex_mask)\n\n lonely_vertex_idx0 = tf.squeeze(tf.cast(tf.where(lonely_vertex_mask), tf.int32),1)\n lonely_vertex_idx = tf.cond(any_lonely_vertex, lambda: lonely_vertex_idx0,\n lambda: tf.zeros([], tf.int32))\n\n\n # set -1 as the first one\n empty_indices = tf.cast(tf.where(fidx_pv_empty_mask), tf.int32)\n the_first_face_dix = tf.gather(fidx_per_vertex[:,0], empty_indices[:,0],axis=0)\n tmp = tf.scatter_nd(empty_indices, the_first_face_dix+1, tf.shape(fidx_per_vertex))\n fidx_per_vertex = fidx_per_vertex + tmp\n\n\n #***************************************************************************\n # get neighbor verties\n edges_per_vertexs_flat = tf.gather(tf.squeeze(vidx_per_face,-1), vidx_fidx_flat_sorted[:,1])\n\n # remove self vertex\n self_mask = tf.equal(edges_per_vertexs_flat, vidx_fidx_flat_sorted[:,0:1])\n self_mask = tf.cast(self_mask, tf.int32)\n sort_indices = tf.contrib.framework.argsort(self_mask, axis=-1)\n sort_indices = tf.expand_dims(sort_indices, 2)\n tmp0 = tf.reshape(tf.range(tf.shape(sort_indices)[0]), [-1,1,1])\n tmp0 = tf.tile(tmp0, [1,3,1])\n sort_indices = tf.concat([tmp0, sort_indices], 2)\n\n edges_per_vertexs_flat = tf.gather_nd(edges_per_vertexs_flat, sort_indices)\n edges_per_vertexs_flat = edges_per_vertexs_flat[:,0:2]\n\n # reshape edges_per_vertexs_flat\n edges_per_vertex = tf.scatter_nd(vidx_fidxperv, edges_per_vertexs_flat+1,\\\n [num_vertex0, max_nf_perv, 2])-1\n\n\n #***************************************************************************\n # sort edge vertices by path\n edgev_sort_method = 'geodesic_angle'\n if edgev_sort_method == 'geodesic_angle':\n edgev_per_vertex, valid_ev_num_pv = EdgeVPath.sort_edgev_by_angle(edges_per_vertex, xyz, norm, cycle_idx=True, max_evnum_next=MeshSampling._edgev_num, geodis=1)\n #EdgeVPath.main_test_expand_path(edgev_per_vertex, valid_ev_num_pv, xyz, norm)\n elif edgev_sort_method == 'path':\n edgev_per_vertex, valid_ev_num_pv, close_flag = MeshSampling.sort_edge_vertices(edges_per_vertex)\n\n valid_ev_num_ave = tf.reduce_mean(valid_ev_num_pv)\n valid_ev_num_max = tf.reduce_max(valid_ev_num_pv)\n #close_num = tf.reduce_sum(tf.cast(tf.equal(close_flag, 1), tf.int32))\n mesh_summary['valid_edgev_num_ave'] = valid_ev_num_ave\n mesh_summary['valid_edgev_num_max'] = valid_ev_num_max\n edgev_per_vertex = tf.Print(edgev_per_vertex, [valid_ev_num_ave, valid_ev_num_max], message=\"ave max valid_ev_num\")\n\n #***************************************************************************\n # fixed shape of unsorted edges_per_vertex\n edges_per_vertex = edges_per_vertex[:, 0:MeshSampling._max_nf_perv,:]\n nv0 = num_vertex0 if not isinstance(num_vertex0, tf.Tensor) else None\n edges_per_vertex.set_shape([nv0, MeshSampling._max_nf_perv, 2])\n edges_pv_empty_mask = tf.cast(tf.equal(edges_per_vertex, -1), tf.bool)\n\n # set -1 as the first one\n the_first_edges_dix = tf.gather(edges_per_vertex[:,0,:], empty_indices[:,0])\n tmp = tf.scatter_nd(empty_indices, the_first_edges_dix+1, tf.shape(edges_per_vertex))\n edges_per_vertex += tmp\n\n # reshape and flat to the same dims with other elements, to store in the\n # same array\n edges_per_vertex = tf.reshape(edges_per_vertex, [num_vertex0, MeshSampling._max_nf_perv*2])\n edges_pv_empty_mask = tf.reshape(edges_pv_empty_mask, [num_vertex0, MeshSampling._max_nf_perv*2])\n\n #***********************\n # set face idx for the lonely vertex as 0, just avoid grammer error. but should not be used\n fidx_per_vertex += tf.cast(tf.expand_dims( lonely_vertex_mask, 1), tf.int32)\n edges_per_vertex += tf.cast(tf.expand_dims( lonely_vertex_mask, 1), tf.int32)\n\n return fidx_per_vertex, fidx_pv_empty_mask, edgev_per_vertex, valid_ev_num_pv,\\\n edges_per_vertex, edges_pv_empty_mask, lonely_vertex_idx\n\n\n @staticmethod\n def find_next_vertex(edgev_per_vertex, remain_edges_pv, valid_ev_num_pv, e, round_id):\n reshape = get_tensor_shape(remain_edges_pv)\n vertex_num = reshape[0]\n remain_vnum = reshape[1]\n remain_edge_num = remain_vnum/2\n\n last_v = edgev_per_vertex[:,-1:]\n same_mask = tf.equal(last_v, remain_edges_pv)\n remain_valid_mask = tf.greater_equal(remain_edges_pv, 0)\n same_mask = tf.logical_and(same_mask, remain_valid_mask)\n\n # There should be at most one vertex matches\n same_nums = tf.reduce_sum(tf.cast(same_mask, tf.int32), -1)\n max_same_num = tf.reduce_max(same_nums)\n if round_id ==1:\n def check_one_component():\n # some vertex are lost: more than one component\n not_finished_mask = tf.reduce_any(tf.greater(remain_edges_pv,-1),1)\n not_finished_idx = tf.squeeze(tf.cast(tf.where(not_finished_mask), tf.int32),1)\n more_component = tf.shape(not_finished_idx)[0]\n check_failed = tf.assert_less(more_component, 5)\n #with tf.control_dependencies([check_failed]):\n # same_mask = tf.identity(same_mask)\n #same_mask = tf.Print(same_mask, [more_component], message=\"more than one_component\")\n return more_component\n\n tf.cond(tf.not_equal(max_same_num,1), check_one_component, lambda : 0)\n\n # get the next vertex idx along the path\n same_edge_idx_pv = tf.cast(tf.where(same_mask), tf.int32)\n tmp = 1 - 2 * tf.mod(same_edge_idx_pv[:,1:2], 2)\n tmp = tf.concat([tf.zeros(tf.shape(tmp), tf.int32), tmp], -1)\n next_vid_in_e = same_edge_idx_pv + tmp\n\n # open vertex: cannot find the next one\n open_vertex_mask = tf.equal(same_nums, 0)\n last_valid_mask = tf.equal(e+1, valid_ev_num_pv)\n open_vertex_mask = tf.logical_and(open_vertex_mask, last_valid_mask)\n open_vidx = tf.squeeze(tf.cast(tf.where(open_vertex_mask), tf.int32),1)\n open_edge_num = tf.reduce_sum(tf.cast(open_vertex_mask, tf.int32))\n #print('{} edge, {} round open_edge_num:{}'.format(e, round_id, open_edge_num))\n\n # gen the mask to disable next edge\n edge_idx = same_edge_idx_pv[:,1:2]/2\n edge_idx = tf.concat([same_edge_idx_pv[:,0:1], edge_idx], 1)\n next_edge_mask = tf.scatter_nd(edge_idx, tf.ones(tf.shape(edge_idx)[0], tf.int32), [vertex_num, remain_edge_num])\n next_vertex_mask = tf.reshape(tf.tile(tf.expand_dims(next_edge_mask, -1), [1,1,2]), [vertex_num, reshape[1]])\n return next_vid_in_e, open_vidx, next_vertex_mask, open_vertex_mask\n\n @staticmethod\n def sort_edge_vertices(edges_per_vertex):\n '''\n get edgev_per_vertex: edge vertices sorted by path\n '''\n eshape = get_tensor_shape(edges_per_vertex)\n assert len(eshape) == 3\n vertex_num = eshape[0]\n edge_num = eshape[1]\n edges_per_vertex = tf.reshape(edges_per_vertex, [vertex_num, -1])\n\n def sort_one_edge(e, edgev_per_vertex, remain_edges_pv, loop_vid_in_e_start, valid_ev_num_pv, close_flag):\n next_vid_in_e, open_vidx, next_vertex_mask, open_vertex_mask = MeshSampling.find_next_vertex(\n edgev_per_vertex, remain_edges_pv, valid_ev_num_pv, e, 1)\n\n second_round = True\n if second_round:\n edgev_pv_open = tf.gather(edgev_per_vertex, open_vidx)\n #inverse edge vertice to find next vertex\n edgev_pv_open = tf.reverse(edgev_pv_open, [1])\n remain_edges_pv_open = tf.gather(remain_edges_pv, open_vidx)\n valid_ev_num_pv_open = tf.gather(valid_ev_num_pv, open_vidx)\n next_vid_in_e_2, open_vidx_2, next_vertex_mask_2, open_vertex_mask_2 = MeshSampling.find_next_vertex(\n edgev_pv_open, remain_edges_pv_open, valid_ev_num_pv_open, e, 2)\n\n # update close flag for second round open vertex\n open_v_2 = tf.gather(edgev_pv_open, open_vidx_2)\n is_close = tf.cast(tf.equal(open_v_2[:,0], open_v_2[:,-1]), tf.int32)\n is_close = is_close * 2 -1\n open_vidx_2 = tf.expand_dims(tf.gather(open_vidx, open_vidx_2),1)\n new_close_flag = tf.scatter_nd(open_vidx_2, is_close, [vertex_num])\n close_flag += new_close_flag\n loop_vid_in_e_start += tf.maximum(new_close_flag,0)\n\n # if it is still open, should reach the end, just leave it and set -1 in\n # next_vidx\n tmp = tf.gather(open_vidx, next_vid_in_e_2[:,0:1])\n next_vid_in_e_2 = tf.concat([tmp, next_vid_in_e_2[:,1:2]], 1)\n next_vid_in_e = tf.concat([next_vid_in_e, next_vid_in_e_2], 0)\n\n # gather the raw vertex in the scene\n next_vertex_idx = tf.gather_nd(remain_edges_pv, next_vid_in_e)\n next_vidx = tf.scatter_nd(next_vid_in_e[:,0:1], next_vertex_idx+1, [vertex_num])-1\n next_vidx = tf.expand_dims(next_vidx, 1)\n\n # update valid_ev_num_pv\n add_valid = tf.scatter_nd(next_vid_in_e[:,0:1], tf.ones(tf.shape(next_vid_in_e)[0:1], tf.int32), [vertex_num])\n valid_ev_num_pv += add_valid\n\n\n # update edgev_per_vertex\n if second_round:\n edgev_per_vertex_reversed = tf.scatter_nd(tf.expand_dims(open_vidx,-1), edgev_pv_open,\n [vertex_num, get_tensor_shape(edgev_pv_open)[1]])\n edgev_per_vertex *= 1-tf.cast(tf.expand_dims(open_vertex_mask,1), tf.int32)\n edgev_per_vertex += edgev_per_vertex_reversed\n\n # if it reaches the end, loop the cycle\n is_loop_invalid = True\n if is_loop_invalid:\n need_loop_vidx = tf.where(tf.less(valid_ev_num_pv, e+2))\n next_vid_in_e_loop = tf.gather(loop_vid_in_e_start, need_loop_vidx[:,0])\n tmp = tf.SparseTensor(need_loop_vidx, tf.ones(tf.shape(need_loop_vidx)[0:1], tf.int32), tf.cast(tf.shape(loop_vid_in_e_start),tf.int64))\n loop_vid_in_e_start = tf.sparse_add(loop_vid_in_e_start, tmp)\n next_vid_in_e_loop = tf.expand_dims(next_vid_in_e_loop, 1)\n next_vid_in_e_loop = tf.concat([tf.cast(need_loop_vidx,tf.int32), next_vid_in_e_loop], 1)\n next_vertex_idx_loop = tf.gather_nd(edgev_per_vertex, next_vid_in_e_loop)\n next_vidx_loop = tf.scatter_nd(next_vid_in_e_loop[:,0:1], next_vertex_idx_loop+1, [vertex_num])\n next_vidx += tf.expand_dims(next_vidx_loop,1)\n\n edgev_per_vertex = tf.concat([edgev_per_vertex, next_vidx], 1)\n\n # update remain_edges_pv\n if second_round:\n next_vertex_mask_2 = tf.scatter_nd(tf.expand_dims(open_vidx,-1), next_vertex_mask_2, tf.shape(next_vertex_mask))\n next_vertex_mask += next_vertex_mask_2\n remain_edges_pv = (remain_edges_pv+2) * tf.cast(1-next_vertex_mask, tf.int32) - 2\n\n e += 1\n return e, edgev_per_vertex, remain_edges_pv, loop_vid_in_e_start, valid_ev_num_pv, close_flag\n\n\n e = tf.constant(1)\n edgev_per_vertex = edges_per_vertex[:,0:2]\n remain_edges_pv = edges_per_vertex[:,2:]\n loop_vid_in_e_start = tf.ones([vertex_num], tf.int32)*0 # assume the path close, so loop start from the second one\n valid_ev_num_pv = tf.ones([vertex_num], tf.int32)*2\n close_flag = tf.zeros([vertex_num], tf.int32)\n cond = lambda e, edgev_per_vertex, remain_edges_pv, loop_vid_in_e_start, valid_ev_num_pv, close_flag: tf.less(e, edge_num)\n\n e, edgev_per_vertex, remain_edges_pv, loop_vid_in_e_start, valid_ev_num_pv, close_flag = tf.while_loop(cond, sort_one_edge, \\\n [e, edgev_per_vertex, remain_edges_pv, loop_vid_in_e_start, valid_ev_num_pv, close_flag])\n valid_ev_num_pv = tf.expand_dims(valid_ev_num_pv, 1)\n\n return edgev_per_vertex, valid_ev_num_pv, close_flag\n\n @staticmethod\n def sort_by_spectral(edges):\n import pudb; pudb.set_trace() # XXX BREAKPOINT\n pass\n\n\n @staticmethod\n def get_simplicity_label( fidx_per_vertex, fidx_pv_empty_mask,\n edges_per_vertex, edges_pv_empty_mask,\n vertex_nxnynz, face_label_category, face_label_instance):\n '''\n Inputs: [N, ?] [N,3] [F,1]\n A point is simple if:\n (1) all the faces belong to one point with same category and instance (and) material\n (2) all the faces belong to one point with similar normal\n '''\n num_vertex0 = tf.shape(vertex_nxnynz)[0]\n num_vertex0_f = tf.cast(num_vertex0, tf.float64)\n\n def get_same_category_mask():\n # get same_category_mask\n face_label_category_ = tf.squeeze(face_label_category, 1)\n vertex_label_categories = tf.gather(face_label_category_, fidx_per_vertex)\n\n same_category_mask = tf.equal(vertex_label_categories, vertex_label_categories[:,0:1])\n same_category_mask = tf.logical_or(same_category_mask, fidx_pv_empty_mask)\n same_category_mask = tf.reduce_all(same_category_mask, 1)\n return same_category_mask\n\n # get normal same mask\n def get_same_normal_mask(max_normal_dif):\n normal = tf.gather(vertex_nxnynz, edges_per_vertex)\n norm_dif_angle = tf.matmul(normal, tf.expand_dims(normal[:,0,:], -1))\n norm_dif_angle = tf.squeeze(norm_dif_angle, -1)\n max_norm_dif_angle = np.cos(MeshSampling._max_norm_dif_angle/180.0*np.pi)\n same_normal_mask = tf.greater(norm_dif_angle, max_norm_dif_angle)\n same_normal_mask = tf.logical_or(same_normal_mask, edges_pv_empty_mask)\n same_normal_mask = tf.reduce_all(same_normal_mask, 1)\n return same_normal_mask\n\n def extend_same_mask(same_mask0):\n same_mask0 = tf.greater(same_mask0, 0)\n extended_mask = tf.gather(same_mask0, edges_per_vertex)\n extended_mask = tf.logical_or(extended_mask, edges_pv_empty_mask)\n extended_mask = tf.reduce_all(extended_mask, 1)\n extended_mask = tf.cast(extended_mask, tf.int8)\n return extended_mask\n\n\n same_normal_mask = tf.cast(get_same_normal_mask(1e-2), tf.int8)\n same_category_mask = tf.cast(get_same_category_mask(), tf.int8)\n\n for d in range(MeshSampling._full_edge_dis-1):\n same_category_mask += extend_same_mask(same_category_mask)\n same_normal_mask += extend_same_mask(same_normal_mask)\n\n return same_normal_mask, same_category_mask\n\n @staticmethod\n def same_mask_nums(same_mask):\n same_nums = []\n for e in range(MeshSampling._full_edge_dis+1):\n same_num = tf.reduce_sum(tf.cast(tf.equal(same_mask, e), tf.int32))\n same_nums.append(same_num)\n return same_nums\n\n @staticmethod\n def same_mask_rates(same_mask, pre=''):\n num_total = tf.cast( TfUtil.tshape0( same_mask ), tf.float64)\n same_nums = MeshSampling.same_mask_nums(same_mask)\n same_rates = [1.0*n/num_total for n in same_nums]\n same_rates[0] = tf.Print(same_rates[0], same_rates, message=pre+' same rates: ')\n #print('\\n{} same rate:{}\\n'.format(pre, same_rates))\n return same_rates\n\n @staticmethod\n def get_face_same_mask(vertex_same_mask, vidx_per_face):\n same_mask = tf.gather(vertex_same_mask, vidx_per_face)\n face_same_mask = tf.reduce_max(same_mask, 1)\n return face_same_mask\n\n\n @staticmethod\n def rm_some_labels(raw_datas, dset_metas, splited_vidx):\n vertex_label = raw_datas['label_category'].shape[0] == raw_datas['xyz'].shape[0]\n if not vertex_label:\n return MeshSampling.rm_some_face_labels(raw_datas, dset_metas, splited_vidx)\n else:\n return MeshSampling.rm_some_vertex_labels(raw_datas, dset_metas, splited_vidx)\n\n @staticmethod\n def rm_some_vertex_labels(raw_datas, dset_metas, splited_vidx):\n unwanted_classes = ['void']\n unwanted_labels = tf.constant([[dset_metas.class2label[c] for c in unwanted_classes]], tf.int32)\n\n label_category = raw_datas['label_category']\n keep_vertex_mask = tf.not_equal(label_category, unwanted_labels)\n keep_vertex_mask = tf.reduce_all(keep_vertex_mask, 1)\n keep_vertex_idx = tf.squeeze(tf.cast(tf.where(keep_vertex_mask), tf.int32),1)\n\n num_vertex0 = TfUtil.tshape0(raw_datas['xyz'])\n if 'vidx_per_face' in raw_datas:\n keep_face_idx, vidx_per_face_new, _ = MeshSampling.down_sampling_face(\n keep_vertex_idx, num_vertex0, raw_datas['vidx_per_face'], True)\n else:\n keep_face_idx = vidx_per_face_new = None\n\n raw_datas = MeshSampling.gather_datas(raw_datas, keep_vertex_idx,\n keep_face_idx, vidx_per_face_new)\n\n # clean splited_vidx\n new_vidx_2_old_vidx = tf.scatter_nd(tf.expand_dims(keep_vertex_idx,-1), tf.range(tf.shape(keep_vertex_idx)[0])+1, [num_vertex0])-1\n if len(splited_vidx)>1:\n for i in range(len(splited_vidx)):\n new_vidx_i = tf.gather(new_vidx_2_old_vidx, splited_vidx[i])\n keep_idx_i = tf.cast(tf.where(tf.greater(new_vidx_i, -1)), tf.int32)\n splited_vidx[i] = tf.squeeze(tf.gather(new_vidx_i, keep_idx_i),1)\n return raw_datas, splited_vidx\n\n @staticmethod\n def rm_some_face_labels(raw_datas, dset_metas, splited_vidx):\n unwanted_classes = ['void']\n unwanted_labels = tf.constant([[dset_metas.class2label[c] for c in unwanted_classes]], tf.int32)\n\n label_category = raw_datas['label_category']\n keep_face_mask = tf.not_equal(label_category, unwanted_labels)\n keep_face_mask = tf.reduce_all(keep_face_mask, 1)\n keep_face_idx = tf.squeeze(tf.cast(tf.where(keep_face_mask), tf.int32),1)\n\n vidx_per_face = raw_datas['vidx_per_face']\n keep_vidx = tf.gather(vidx_per_face, keep_face_idx)\n keep_vidx = tf.reshape(keep_vidx, [-1])\n keep_vertex_idx ,_ = tf.unique(keep_vidx)\n\n num_vertex0 = TfUtil.tshape0(raw_datas['xyz'])\n keep_face_idx, vidx_per_face_new, _ = MeshSampling.down_sampling_face(\n keep_vertex_idx, num_vertex0, raw_datas['vidx_per_face'], True)\n\n raw_datas = MeshSampling.gather_datas(raw_datas, keep_vertex_idx,\n keep_face_idx, vidx_per_face_new)\n\n # clean splited_vidx\n new_vidx_2_old_vidx = tf.scatter_nd(tf.expand_dims(keep_vertex_idx,-1), tf.range(tf.shape(keep_vertex_idx)[0])+1, [num_vertex0])-1\n if len(splited_vidx)>1:\n for i in range(len(splited_vidx)):\n new_vidx_i = tf.gather(new_vidx_2_old_vidx, splited_vidx[i])\n keep_idx_i = tf.cast(tf.where(tf.greater(new_vidx_i, -1)), tf.int32)\n splited_vidx[i] = tf.squeeze(tf.gather(new_vidx_i, keep_idx_i),1)\n return raw_datas, splited_vidx\n\n\n @staticmethod\n def sampling_mesh( _num_vertex_sp, raw_datas, mesh_summary):\n num_vertex0 = TfUtil.tshape0(raw_datas['xyz'])\n if not isinstance(num_vertex0, tf.Tensor):\n sampling_rate = 1.0 * _num_vertex_sp / tf.cast(num_vertex0, tf.float32)\n print('\\nsampling org_num={}, fixed_num={}, valid_sp_rate={} (after rm void)\\n'.format(num_vertex0, _num_vertex_sp, sampling_rate))\n\n is_down_sampling = tf.less(_num_vertex_sp, num_vertex0)\n sampled_datas = tf.cond(is_down_sampling,\n lambda: MeshSampling.down_sampling_mesh(_num_vertex_sp, raw_datas.copy(), mesh_summary),\n lambda: MeshSampling.up_sampling_mesh(_num_vertex_sp, raw_datas.copy()))\n return sampled_datas\n\n @staticmethod\n def up_sampling_mesh( _num_vertex_sp, raw_datas):\n #MeshSampling.show_datas_shape(raw_datas)\n num_vertex0 = TfUtil.tshape0(raw_datas['xyz'])\n duplicate_num = _num_vertex_sp - num_vertex0\n with tf.control_dependencies([tf.assert_greater_equal(duplicate_num, 0, message=\"duplicate_num\")]):\n duplicate_num = tf.identity(duplicate_num)\n\n if 'same_category_mask' in raw_datas:\n raw_datas['same_category_mask'] = tf.cast(raw_datas['same_category_mask'], tf.int32)\n raw_datas['same_normal_mask'] = tf.cast(raw_datas['same_normal_mask'], tf.int32)\n for item in raw_datas:\n is_vertex = item in MeshSampling._vertex_eles\n if is_vertex:\n if TfUtil.tsize(raw_datas[item])>1:\n duplicated = tf.tile(raw_datas[item][-1:,:], [duplicate_num, 1])\n else:\n duplicated = tf.tile(raw_datas[item][-1:], [duplicate_num])\n raw_datas[item] = tf.concat([raw_datas[item], duplicated], 0)\n shape0 = TfUtil.get_tensor_shape(raw_datas[item])\n shape0[0] = _num_vertex_sp\n raw_datas[item].set_shape(shape0)\n if 'same_category_mask' in raw_datas:\n raw_datas['same_category_mask'] = tf.cast(raw_datas['same_category_mask'], tf.int8)\n raw_datas['same_normal_mask'] = tf.cast(raw_datas['same_normal_mask'], tf.int8)\n return raw_datas\n\n\n @staticmethod\n def down_sampling_mesh(_num_vertex_sp, raw_datas, mesh_summary):\n #return VertexDecimation.down_sampling_mesh(_num_vertex_sp, raw_datas, mesh_summary)\n return MeshSampling.down_sampling_mesh0(_num_vertex_sp, raw_datas, mesh_summary)\n\n @staticmethod\n def down_sampling_mesh0(_num_vertex_sp, raw_datas, mesh_summary, down_sp_method='random'):\n num_vertex0 = TfUtil.tshape0(raw_datas['xyz'])\n if down_sp_method == 'prefer_simple':\n vertex_spidx = MeshSampling.down_sampling_vertex_presimple(\n raw_datas['same_normal_mask'], _num_vertex_sp)\n elif down_sp_method == 'random':\n vertex_spidx = MeshSampling.down_sampling_vertex_random(\n num_vertex0, _num_vertex_sp)\n\n if 'vidx_per_face' in raw_datas and raw_datas['vidx_per_face'] is not None:\n if 'edgev_per_vertex' in raw_datas:\n edgev_per_vertex = raw_datas['edgev_per_vertex']\n valid_ev_num_pv = raw_datas['valid_ev_num_pv']\n else:\n edgev_per_vertex = None\n valid_ev_num_pv = None\n\n face_sp_indices, vidx_per_face_new, edgev_per_vertex_new, valid_ev_num_pv_new =\\\n MeshSampling.update_face_edgev(\n vertex_spidx, num_vertex0, raw_datas['vidx_per_face'],\n edgev_per_vertex, valid_ev_num_pv, xyz=raw_datas['xyz'],\n mesh_summary=mesh_summary)\n else:\n face_sp_indices = vidx_per_face_new = edgev_per_vertex_new = valid_ev_num_pv_new = None\n raw_datas = MeshSampling.gather_datas(raw_datas, vertex_spidx,\n face_sp_indices, vidx_per_face_new,\n edgev_per_vertex_new, valid_ev_num_pv_new)\n return raw_datas\n\n @staticmethod\n def gather_datas(datas, vertex_spidx, face_sp_indices=None, vidx_per_face_new=None,\n edgev_per_vertex_new=None, valid_ev_num_pv_new=None):\n num_vertex0 = TfUtil.tshape0(datas['xyz'])\n new_datas = {}\n\n for item in datas:\n if item in ['vidx_per_face', 'edgev_per_vertex', 'valid_ev_num_pv']:\n # not only order changed, update seperatly\n continue\n is_vertex_0 = tf.equal(tf.shape(datas[item])[0], num_vertex0)\n is_vertex = item in MeshSampling._vertex_eles\n check0 = tf.assert_equal(is_vertex, is_vertex_0)\n with tf.control_dependencies([check0]):\n is_vertex = tf.identity(is_vertex)\n\n if face_sp_indices is None:\n with tf.control_dependencies([tf.assert_equal(is_vertex, True)]):\n sp_indices = vertex_spidx\n else:\n sp_indices = tf.cond(is_vertex,\n lambda: vertex_spidx,\n lambda: face_sp_indices )\n new_datas[item] = tf.gather(datas[item], sp_indices)\n\n if vidx_per_face_new is not None:\n new_datas['vidx_per_face'] = vidx_per_face_new\n if 'edgev_per_vertex' in datas:\n new_datas['edgev_per_vertex'] = edgev_per_vertex_new\n new_datas['valid_ev_num_pv'] = valid_ev_num_pv_new\n return new_datas\n\n @staticmethod\n def up_sampling_vertex(same_normal_mask, _num_vertex_sp):\n num_vertex0 = TfUtil.tshape0( same_normal_mask )\n #simple_indices = tf.squeeze(tf.where(tf.greater_equal(\n # same_normal_mask, MeshSampling._full_edge_dis)),1)\n duplicate_num = _num_vertex_sp - num_vertex0\n #duplicate_indices = tf.tile( simple_indices[0:1], [duplicate_num] )\n duplicate_indices = tf.ones([duplicate_num], tf.int32) * (num_vertex0 -1)\n vertex_spidx = tf.concat([tf.range(num_vertex0), duplicate_indices], 0)\n return vertex_spidx\n\n @staticmethod\n def down_sampling_vertex_random(num_vertex0, _num_vertex_sp):\n vertex_spidx = tf.random_shuffle(tf.range(num_vertex0))[0:_num_vertex_sp]\n vertex_spidx = tf.contrib.framework.sort(vertex_spidx)\n return vertex_spidx\n\n\n @staticmethod\n def down_sampling_vertex_presimple(same_normal_mask, _num_vertex_sp):\n same_normal_mask = tf.squeeze(same_normal_mask)\n num_vertex0 = tf.shape(same_normal_mask)[0]\n sampling_rate = 1.0 * tf.cast(_num_vertex_sp, tf.float32) / tf.cast(num_vertex0, tf.float32)\n #print('org num:{}, sampled num:{}, sampling_rate:{}'.format(\n # num_vertex0, _num_vertex_sp, sampling_rate))\n del_num = num_vertex0 - _num_vertex_sp\n same_nums = MeshSampling.same_mask_nums(same_normal_mask)\n full_dis = MeshSampling._full_edge_dis\n\n #*********************\n # max_dis: the max dis that provide enough simple vertices to remove\n assert len(same_nums) == full_dis + 1\n\n j = tf.constant(0, tf.int32)\n max_dis = tf.constant(full_dis, tf.int32)\n simple_is_enough_to_rm = tf.constant(False, tf.bool)\n\n def cond(j, simple_is_enough_to_rm, max_dis):\n cond0 = tf.less(j, full_dis)\n cond1 = tf.logical_not(simple_is_enough_to_rm)\n cond = tf.logical_and(cond0, cond1)\n return cond\n def body(j, simple_is_enough_to_rm, max_dis):\n max_dis = full_dis -j\n simple_num = tf.reduce_sum(tf.gather(same_nums, tf.range(full_dis-j,full_dis+1)))\n simple_is_enough_to_rm = tf.greater(simple_num, del_num)\n j += 1\n return j, simple_is_enough_to_rm, max_dis\n j, simple_is_enough_to_rm, max_dis = tf.while_loop(cond, body, [j, simple_is_enough_to_rm, max_dis])\n\n max_dis = tf.cast(max_dis, tf.int8)\n\n #*********************\n complex_indices = tf.squeeze(tf.where(tf.less(same_normal_mask, max_dis)),1)\n complex_indices = tf.cast(complex_indices, tf.int32)\n complex_num = tf.shape(complex_indices)[0]\n simple_indices = tf.squeeze(tf.where(tf.greater_equal(same_normal_mask, max_dis)),1)\n simple_indices = tf.cast(simple_indices, tf.int32)\n simple_num = tf.shape(simple_indices)[0]\n # max_dis>0: simple vertices are enough to del. Keep all complex, rm from\n # simple: complex_indices + part of simple_indices\n # max_dis==0: rm all simple, and part of complex\n\n def rm_part_of_simple_only():\n sp_num_from_simple = _num_vertex_sp - complex_num\n tmp = tf.random_shuffle(tf.range(simple_num))[0:sp_num_from_simple]\n simple_sp_indices = tf.gather(simple_indices, tmp)\n vertex_spidx = tf.concat([complex_indices, simple_sp_indices], 0)\n return vertex_spidx\n\n def rm_all_simple_partof_complex():\n sp_num_from_complex = _num_vertex_sp\n tmp = tf.random_shuffle(tf.range(complex_num))[0:sp_num_from_complex]\n vertex_spidx = tf.gather(complex_indices, tmp)\n return vertex_spidx\n\n vertex_spidx = tf.cond(simple_is_enough_to_rm,\n rm_part_of_simple_only,\n rm_all_simple_partof_complex )\n\n vertex_spidx = tf.contrib.framework.sort(vertex_spidx)\n\n check_s = tf.assert_equal(tf.shape(vertex_spidx)[0], _num_vertex_sp)\n with tf.control_dependencies([check_s]):\n vertex_spidx = tf.identity(vertex_spidx)\n vertex_spidx.set_shape([_num_vertex_sp])\n\n if MeshSampling._check_optial:\n # check no duplicate\n tmp0, tmp1, tmp_count = tf.unique_with_counts(vertex_spidx)\n max_count = tf.reduce_max(tmp_count)\n check_no_duplicate = tf.assert_equal(max_count,1)\n with tf.control_dependencies([check_no_duplicate]):\n vertex_spidx = tf.identity(vertex_spidx)\n\n return vertex_spidx\n\n @staticmethod\n def move_neg_left(edgev_per_vertex0):\n invalid_mask = tf.less(edgev_per_vertex0, 0)\n invalid_idx = tf.cast(tf.where(invalid_mask), tf.int32)\n left_idx0 = invalid_idx - tf.constant([[0,1]])\n vn, en = get_tensor_shape(edgev_per_vertex0)\n min_limit = tf.cast( tf.less(left_idx0[:,1:2], 0), tf.int32 ) * en\n tmp = tf.zeros([get_tensor_shape(left_idx0)[0],1], tf.int32)\n min_limit = tf.concat([tmp, min_limit], 1)\n left_idx0 += min_limit\n\n left_idx = tf.gather_nd(edgev_per_vertex0, left_idx0) + 1\n move_left = tf.scatter_nd(invalid_idx, left_idx, tf.shape(edgev_per_vertex0))\n edgev_per_vertex1 = edgev_per_vertex0 + move_left\n invalid_num = tf.reduce_sum(tf.cast(tf.less(edgev_per_vertex1,0), tf.int32))\n return edgev_per_vertex1, invalid_num\n\n @staticmethod\n def move_neg_right(edgev_per_vertex0):\n invalid_mask = tf.less(edgev_per_vertex0, 0)\n invalid_idx = tf.cast(tf.where(invalid_mask), tf.int32)\n vn, en = get_tensor_shape(edgev_per_vertex0)\n right_idx0 = invalid_idx + tf.constant([[0,1]])\n max_limit = tf.cast( tf.less(right_idx0[:,1:2], en), tf.int32 )\n tmp = tf.ones([get_tensor_shape(right_idx0)[0],1], tf.int32)\n max_limit = tf.concat([tmp, max_limit], 1)\n right_idx0 *= max_limit\n\n right_idx = tf.gather_nd(edgev_per_vertex0, right_idx0) + 1\n move_right = tf.scatter_nd(invalid_idx, right_idx, tf.shape(edgev_per_vertex0))\n edgev_per_vertex1 = edgev_per_vertex0 + move_right\n invalid_num = tf.reduce_sum(tf.cast(tf.less(edgev_per_vertex1,0), tf.int32))\n return edgev_per_vertex1, invalid_num\n\n @staticmethod\n def replace_neg(edgev_per_vertex0, invalid_num, round_id):\n edgev_per_vertex1, invalid_num = MeshSampling.move_neg_left (edgev_per_vertex0)\n def no_op():\n return edgev_per_vertex1, invalid_num\n def move_right():\n return MeshSampling.move_neg_right(edgev_per_vertex1)\n edgev_per_vertex1, invalid_num = tf.cond(tf.greater(invalid_num,0), move_right, no_op)\n return edgev_per_vertex1, invalid_num, round_id+1\n\n @staticmethod\n def replace_neg_by_lr(edgev_per_vertex_new1, max_round=2):\n invalid_num = tf.reduce_sum(tf.cast(tf.less(edgev_per_vertex_new1,0), tf.int32))\n round_id = tf.constant(0)\n cond = lambda edgev_per_vertex_new1, invalid_num, round_id: tf.logical_and(tf.greater(invalid_num, 0), tf.less(round_id, max_round))\n edgev_per_vertex_new2, invalid_num2, round_id2 = tf.while_loop(cond,\n MeshSampling.replace_neg,\n [edgev_per_vertex_new1, invalid_num, round_id])\n\n return edgev_per_vertex_new2\n\n @staticmethod\n def replace_neg_by_self(edgev_per_vertex_new2):\n lonely_mask = tf.less(edgev_per_vertex_new2, 0)\n any_lonely = tf.reduce_any(lonely_mask)\n def do_replace_by_self():\n lonely_vidx = tf.cast(tf.where(lonely_mask), tf.int32)\n lonely_num = tf.cast(tf.shape(lonely_vidx)[0], tf.float32)\n vn = tf.cast(get_tensor_shape(edgev_per_vertex_new2)[0], tf.float32)\n lonely_rate = lonely_num / vn\n tmp = tf.scatter_nd(lonely_vidx, lonely_vidx[:,0]+1, tf.shape(edgev_per_vertex_new2))\n return edgev_per_vertex_new2 + tmp\n def no_op():\n return edgev_per_vertex_new2\n edgev_per_vertex_new3 = tf.cond(any_lonely, do_replace_by_self, no_op)\n return edgev_per_vertex_new3\n\n\n @staticmethod\n def get_twounit_edgev(edgev_per_vertex0, xyz0, raw_vidx_2_sp_vidx, vertex_spidx,\n max_fail_2unit_ev_rate, scale=None):\n # the edgev of edgev: geodesic distance = 2 unit\n edgev_edgev_idx0 = tf.gather(edgev_per_vertex0, edgev_per_vertex0)\n edgev_edgev = tf.gather(edgev_edgev_idx0, vertex_spidx)\n\n edgev_per_vertex = tf.gather(edgev_per_vertex0, vertex_spidx)\n edgev_xyz = tf.gather(xyz0, edgev_per_vertex)\n edgev_edgev_xyz = tf.gather(xyz0, edgev_edgev)\n xyz = tf.expand_dims(tf.gather(xyz0, vertex_spidx), 1)\n # get the vector from base vertex to edgev\n V0 = edgev_xyz - xyz\n V0 = tf.expand_dims(V0, 2)\n xyz = tf.expand_dims(xyz, 1)\n # get the vector from base vertex to edgev_edgev\n V1 = edgev_edgev_xyz - xyz\n\n # (1) At tht other side of edgev: project_l > 1\n V0_normal = tf.norm(V0, axis=-1, keepdims=True)\n V0 = V0 / V0_normal\n project_l = tf.reduce_sum(V0 * V1,-1) / tf.squeeze(V0_normal,3)\n other_side_mask = tf.cast(tf.greater(project_l, 1), tf.float32)\n # (2) valid mask\n edgev_edgev_sampled = tf.gather(raw_vidx_2_sp_vidx, edgev_edgev)\n valid_mask = tf.cast(tf.greater(edgev_edgev_sampled, 0), tf.float32)\n\n # (3) Distance to the projection line: want the colest one\n vn, evn = get_tensor_shape(edgev_per_vertex)\n tmp = tf.cross(tf.tile(V0, [1,1,evn,1]), V1)\n dis_to_proj_line = tf.abs(tf.reduce_sum(tmp, -1))\n\n # make all the vertices not at the other side very big\n dis_to_proj_line += (1 - other_side_mask)*10\n dis_to_proj_line += (1 - valid_mask)*100\n\n min_idx = tf.argmin(dis_to_proj_line, axis=-1, output_type=tf.int32)\n min_idx = tf.expand_dims(min_idx, -1)\n # gather twounit_edgev satisfy three requiements\n v_idx = tf.tile(tf.reshape(tf.range(vn), [-1,1,1]), [1,evn,1])\n ev_idx = tf.tile(tf.reshape(tf.range(evn),[1,-1,1]), [vn,1,1])\n closest_idx = tf.concat([v_idx, ev_idx, min_idx], -1)\n twounit_edgev = tf.gather_nd(edgev_edgev, closest_idx)\n\n # (4) transfer to sampled idx\n twounit_edgev_new = tf.gather(raw_vidx_2_sp_vidx, twounit_edgev)\n\n # (5) Failed to find 2 unit edgev: mainly because all the edgev of a vertex are\n # removed. The rate should be small. Just remove the edgev.\n fail_2unit_ev_mask = tf.less(twounit_edgev_new, 0)\n any_2unit_failed = tf.reduce_any(fail_2unit_ev_mask)\n\n if max_fail_2unit_ev_rate is None:\n max_fail_2unit_ev_rate = 3e-3 # should be 1e-4\n def rm_invalid_2uedgev():\n # all the edgev for a vertex are lost\n fail_2unit_e_mask = tf.reduce_all(fail_2unit_ev_mask, 1)\n fail_2uedge_e_num = tf.reduce_sum(tf.cast(fail_2unit_e_mask, tf.float32))\n fail_2unit_e_rate = fail_2uedge_e_num / tf.cast(vn, tf.float32)\n check_e_fail = tf.assert_less(fail_2unit_e_rate, 1e-4,\n message=\"fail_2unit_e_rate: all two unit edgev of a vertex are lost, scale {}\".format(scale))\n\n # the detailed edgev lost for each vertex\n fail_2uedge_ev_num = tf.reduce_sum(tf.cast(fail_2unit_ev_mask, tf.float32))\n # normally 5e-5 for downsample in data preprocess\n fail_2unit_ev_rate = fail_2uedge_ev_num / tf.cast(vn, tf.float32) / tf.cast(evn, tf.float32)\n #fail_2unit_ev_rate = tf.Print(fail_2unit_ev_rate, [fail_2unit_ev_rate],\n # message=\"\\n\\t\\tfail_2unit_ev_rate scale {}: {} > \".format(scale, max_fail_2unit_ev_rate))\n # (a) All vertices for a path are deleted\n # (b) Spliting lead to some lost near the boundary\n check_ev_fail = tf.assert_less(fail_2unit_ev_rate, max_fail_2unit_ev_rate, message=\"fail_2unit_ev_rate scale {}\".format(scale))\n with tf.control_dependencies([check_e_fail, check_ev_fail]):\n twounit_edgev_new_2 = MeshSampling.replace_neg_by_lr(twounit_edgev_new)\n return MeshSampling.replace_neg_by_self(twounit_edgev_new_2), fail_2unit_ev_rate\n def no_op():\n return twounit_edgev_new, tf.constant(0, tf.float32)\n twounit_edgev_new, fail_2uedge_rate = tf.cond(any_2unit_failed, rm_invalid_2uedgev, no_op)\n\n # (6)final check\n min_idx = tf.reduce_min(twounit_edgev_new)\n #if min_idx.numpy() < 0:\n # # check min dis\n # invalid_mask = tf.less(twounit_edgev_new,0)\n # invalid_idx = tf.where(invalid_mask)\n # import pdb; pdb.set_trace() # XXX BREAKPOINT\n # pass\n check = tf.assert_greater(min_idx, -1, message=\"twounit_edgev_new\")\n with tf.control_dependencies([check]):\n twounit_edgev_new = tf.identity(twounit_edgev_new)\n return twounit_edgev_new, fail_2uedge_rate\n\n @staticmethod\n def replace_neg_by_2unit_edgev(edgev_per_vertex_new1, twounit_edgev):\n neg_mask = tf.cast(tf.less(edgev_per_vertex_new1, 0), tf.int32)\n edgev_per_vertex_new = edgev_per_vertex_new1 * (1-neg_mask) + twounit_edgev * (neg_mask)\n return edgev_per_vertex_new\n\n @staticmethod\n def update_valid_ev_num_pv(edgev_per_vertex_new1, valid_ev_num_pv, vertex_spidx):\n rmed_edgev_mask0 = tf.less(edgev_per_vertex_new1, 0)\n eshape = get_tensor_shape(edgev_per_vertex_new1)\n tmp = tf.tile(tf.reshape(tf.range(eshape[1]), [1,-1]), [eshape[0],1])\n valid_ev_num_pv_new = tf.gather(valid_ev_num_pv, tf.squeeze(vertex_spidx,1))\n valid_mask = tf.less(tmp, valid_ev_num_pv_new)\n rmed_edgev_mask = tf.logical_and(rmed_edgev_mask0, valid_mask)\n rmed_edgev_num = tf.reduce_sum(tf.cast(rmed_edgev_mask, tf.int32), 1)\n valid_ev_num_pv_new = valid_ev_num_pv_new - tf.expand_dims(rmed_edgev_num,1)\n return valid_ev_num_pv_new\n\n @staticmethod\n def rm_lost_face(vidx_per_face, raw_vidx_2_sp_vidx, rm_cond):\n '''\n rm_cond = 'any': remove the face is any vertex of the 3 is deleted\n Used when rm_some_labels\n rm_cond = 'any': remove the face only when all the 3 vertices are deleted\n Used when coarserning vertex\n '''\n assert rm_cond=='any' or rm_cond=='all'\n vidx_per_face_new = tf.gather(raw_vidx_2_sp_vidx, vidx_per_face)\n if rm_cond=='all':\n vidx_per_face_new = MeshSampling.replace_neg_by_lr(vidx_per_face_new, max_round=1)\n remain_mask = tf.reduce_all(tf.greater(vidx_per_face_new, -1),1)\n face_sp_indices = tf.squeeze(tf.where(remain_mask), 1)\n face_sp_indices = tf.cast(face_sp_indices, tf.int32)\n\n vidx_per_face_new = tf.gather(vidx_per_face_new, face_sp_indices)\n return face_sp_indices, vidx_per_face_new\n\n @staticmethod\n def edgev_to_face(edgev, valid_ev_num_pv):\n eshape = get_tensor_shape(edgev)\n v0 = tf.expand_dims(edgev[:,0:-1], 2)\n v1 = tf.expand_dims(edgev[:,1:], 2)\n v2 = tf.tile(tf.reshape(tf.range(eshape[0]), [-1,1,1]), [1,eshape[1]-1,1])\n face = tf.concat([v2, v0, v1], -1)\n\n tmp = tf.tile( tf.reshape(tf.range(eshape[1]), [1, -1]), [eshape[0],1])\n valid_mask = tf.less(tmp, valid_ev_num_pv-1)\n valid_idx = tf.where(valid_mask)\n vidx_per_face_new = tf.gather_nd(face, valid_idx)\n return vidx_per_face_new\n\n @staticmethod\n def update_face_edgev(vertex_spidx, num_vertex0, vidx_per_face, edgev_per_vertex,\n valid_ev_num_pv, xyz, mesh_summary):\n face_sp_indices, vidx_per_face_new, raw_vidx_2_sp_vidx = MeshSampling.down_sampling_face(\\\n vertex_spidx, num_vertex0, vidx_per_face, False)\n if edgev_per_vertex is not None:\n edgev_per_vertex_new3, valid_ev_num_pv_new, raw_edgev_spvidx = MeshSampling.rich_edges(vertex_spidx,\\\n edgev_per_vertex, xyz, raw_vidx_2_sp_vidx, valid_ev_num_pv, mesh_summary)\n else:\n edgev_per_vertex_new3 = valid_ev_num_pv_new = None\n return face_sp_indices, vidx_per_face_new, edgev_per_vertex_new3, valid_ev_num_pv_new\n\n @staticmethod\n def get_raw_vidx_2_sp_vidx(vertex_spidx, num_vertex0):\n assert TfUtil.tsize(vertex_spidx) == 1\n _num_vertex_sp = TfUtil.get_tensor_shape(vertex_spidx)[0]\n vertex_spidx = tf.expand_dims(tf.cast(vertex_spidx, tf.int32),1)\n # scatter new vertex index\n raw_vidx_2_sp_vidx = tf.scatter_nd(vertex_spidx, tf.range(_num_vertex_sp)+1, [num_vertex0])-1\n return raw_vidx_2_sp_vidx\n\n @staticmethod\n def rich_edges(vertex_spidx, edgev_per_vertex, xyz, raw_vidx_2_sp_vidx,\n valid_ev_num_pv, mesh_summary={}, max_fail_2unit_ev_rate=None, scale=None):\n assert len(get_tensor_shape(vertex_spidx)) == 1\n assert len(get_tensor_shape(edgev_per_vertex)) == len(get_tensor_shape(xyz)) == 2\n #rich_edge_method = 'remove'\n rich_edge_method = 'twounit_edgev'\n\n raw_edgev_spvidx = tf.gather(raw_vidx_2_sp_vidx, edgev_per_vertex)\n edgev_per_vertex_new1 = tf.gather(raw_edgev_spvidx, vertex_spidx)\n\n #raw_edgev_spvidx = tf.gather(edgev_per_vertex, vertex_spidx)\n #edgev_per_vertex_new1 = tf.gather(raw_vidx_2_sp_vidx, raw_edgev_spvidx)\n\n if rich_edge_method == 'twounit_edgev':\n twounit_edgev, mesh_summary['fail_2uedge_rate'] = MeshSampling.get_twounit_edgev(\n edgev_per_vertex, xyz, raw_vidx_2_sp_vidx, vertex_spidx, max_fail_2unit_ev_rate, scale)\n edgev_per_vertex_new2 = MeshSampling.replace_neg_by_2unit_edgev(edgev_per_vertex_new1, twounit_edgev)\n if valid_ev_num_pv is None:\n valid_ev_num_pv_new = None\n else:\n valid_ev_num_pv_new = tf.gather( valid_ev_num_pv, vertex_spidx )\n\n elif rich_edge_method == 'remove':\n if valid_ev_num_pv is None:\n valid_ev_num_pv_new = None\n else:\n valid_ev_num_pv_new = MeshSampling.update_valid_ev_num_pv(edgev_per_vertex_new1, valid_ev_num_pv, vertex_spidx)\n edgev_per_vertex_new2 = MeshSampling.replace_neg_by_lr(edgev_per_vertex_new1)\n\n # there may still be some negative, but very few. Just handle as lonely\n # points. Assign self vertex idx to the lonely edges\n edgev_per_vertex_new3 = MeshSampling.replace_neg_by_self(edgev_per_vertex_new2)\n\n return edgev_per_vertex_new3, valid_ev_num_pv_new, raw_edgev_spvidx\n\n @staticmethod\n def get_raw2sp(edgev_per_vertex, raw_vidx_2_sp_vidx, valid_ev_num_pv, raw_edgev_spvidx,\n max_bp_fail_rate=5e-4, scale=None):\n ''' [vn] [vn,12]\n (1) If a vertex is not removed, use itself in raw_vidx_2_sp_vidx\n (2) If a vertex is removed, use any edgev\n (3) If edgev not found, use 2 unit edgev\n '''\n assert len(get_tensor_shape(raw_vidx_2_sp_vidx)) == 1\n assert len(get_tensor_shape(valid_ev_num_pv)) == len(get_tensor_shape(raw_edgev_spvidx)) == 2\n\n eshape = get_tensor_shape(raw_edgev_spvidx)\n tmp = tf.tile(tf.reshape(tf.range(eshape[1]), [1,-1]), [eshape[0],1])\n valid_mask = tf.cast(tf.less(tmp, valid_ev_num_pv), tf.int32)\n edgev_bridge1 = (raw_edgev_spvidx+1) * valid_mask -1\n edgev_backp, _ = tf.nn.top_k(edgev_bridge1, 1, sorted=False)\n edgev_backp = tf.squeeze(edgev_backp, 1)\n\n lost_raw_mask = tf.cast(tf.less(raw_vidx_2_sp_vidx, 0), tf.int32)\n backprop_vidx_0 = raw_vidx_2_sp_vidx + lost_raw_mask * (1+edgev_backp)\n\n # (3) Some lost vertex cannot find edgev. Use 2 unit edgev\n bp_fail_mask0 = tf.less(backprop_vidx_0, 0)\n bp_fail_num0 = tf.reduce_sum(tf.cast(bp_fail_mask0, tf.float32))\n bp_fail_rate0 = bp_fail_num0 / tf.cast(eshape[0], tf.float32)\n\n bp_fail_idx = tf.squeeze(tf.where(bp_fail_mask0),1)\n fail_edgev = tf.gather(edgev_per_vertex, bp_fail_idx)\n fail_2u_edgev = tf.gather(edgev_per_vertex, fail_edgev)\n fail_2u_edgev_sp = tf.gather(raw_vidx_2_sp_vidx, fail_2u_edgev)\n fail_2u_edgev_sp = tf.reshape(fail_2u_edgev_sp, [-1, eshape[1]*eshape[1]])\n edgev2u_backp, _ = tf.nn.top_k(fail_2u_edgev_sp, 1)\n edgev2u_backp = tf.squeeze(edgev2u_backp, 1)\n backprop_vidx_2 = tf.scatter_nd(tf.expand_dims(bp_fail_idx,1), edgev2u_backp+1, [eshape[0]])\n\n backprop_vidx = backprop_vidx_0 + backprop_vidx_2\n\n bp_fail_mask = tf.less(backprop_vidx, 0)\n bp_fail_num = tf.reduce_sum(tf.cast(bp_fail_mask, tf.float32))\n bp_fail_rate = bp_fail_num / tf.cast(eshape[0], tf.float32)\n #backprop_vidx = tf.Print(backprop_vidx, [bp_fail_rate],\n # message=\"bp_fail_rate scale {}: {} > \".format(scale, max_bp_fail_rate))\n check_bp_fail = tf.assert_less(bp_fail_rate, max_bp_fail_rate,\n message=\"too many back prop fail num at scale {}\".format(scale))\n with tf.control_dependencies([bp_fail_rate]):\n backprop_vidx = tf.identity(backprop_vidx)\n return backprop_vidx, bp_fail_mask\n\n @staticmethod\n def down_sampling_face(vertex_spidx, num_vertex0, vidx_per_face, is_rm_some_label):\n assert TfUtil.tsize(vertex_spidx) == 1\n raw_vidx_2_sp_vidx = MeshSampling.get_raw_vidx_2_sp_vidx(vertex_spidx, num_vertex0)\n rm_cond = 'any' if is_rm_some_label else 'all'\n face_sp_indices, vidx_per_face_new = MeshSampling.rm_lost_face(vidx_per_face, raw_vidx_2_sp_vidx, rm_cond=rm_cond)\n return face_sp_indices, vidx_per_face_new, raw_vidx_2_sp_vidx\n\n\n @staticmethod\n def gen_ply_raw(raw_datas, same_normal_mask, same_category_mask, same_norm_cat_mask, ply_dir):\n if ply_dir == None:\n ply_dir = '/tmp'\n # face same mask for generating ply\n face_same_normal_mask = MeshSampling.get_face_same_mask(same_normal_mask,\n raw_datas['vidx_per_face'])\n face_same_category_mask = MeshSampling.get_face_same_mask(same_category_mask,\n raw_datas['vidx_per_face'])\n face_same_norm_cat_mask = MeshSampling.get_face_same_mask(same_norm_cat_mask,\n raw_datas['vidx_per_face'])\n\n # same rates\n same_norm_rates = MeshSampling.same_mask_rates(same_normal_mask, 'v_normal')\n same_category_rates = MeshSampling.same_mask_rates(same_category_mask, 'v_category')\n same_norm_cat_rates = MeshSampling.same_mask_rates(same_norm_cat_mask, 'v_norm_cat')\n\n face_norm_rates = MeshSampling.same_mask_rates(face_same_normal_mask, 'f_normal')\n face_category_rates = MeshSampling.same_mask_rates(face_same_category_mask, 'f_category')\n face_norm_cat_rates = MeshSampling.same_mask_rates(face_same_norm_cat_mask, 'f_norm_cat')\n\n same_normal_mask = same_normal_mask.numpy()\n same_category_mask = same_category_mask.numpy()\n face_same_normal_mask = tf.Print(face_same_normal_mask,\n [MeshSampling._max_norm_dif_angle],\n message='max_normal_dif_angle')\n\n\n ply_util.gen_mesh_ply('{}/face_same_normal_{}degree.ply'.format(ply_dir,\\\n int(MeshSampling._max_norm_dif_angle)),\n raw_datas['xyz'],\n raw_datas['vidx_per_face'],\n face_label = face_same_normal_mask)\n\n ply_util.gen_mesh_ply('{}/face_same_category.ply'.format(ply_dir), raw_datas['xyz'],\n raw_datas['vidx_per_face'],\n face_label = face_same_category_mask)\n ply_util.gen_mesh_ply('{}/face_same_norm_{}degree_cat.ply'.format(ply_dir, \\\n int(MeshSampling._max_norm_dif_angle)),\n raw_datas['xyz'],\n raw_datas['vidx_per_face'],\n face_label = face_same_norm_cat_mask)\n\n\n def show_simplity_label(raw_datas, same_normal_mask, same_category_mask):\n '''\n numpy inputs\n '''\n\n\n\nclass EdgeVPath():\n @staticmethod\n def sort_edgev_by_angle(edge_aug_vidx, xyz, norm, xyz_raw=None, cycle_idx=False, max_evnum_next=None, edge_vidx_base=None, geodis=None):\n '''\n edge_aug_vidx: [batch_size, vertex_num, k1, k2]\n xyz: [batch_size, vertex_num, 3]\n cycle_idx=True to enable fill up empty idx by cycle valid idx\n clean_innder: clean the edgev not with largest geodesic distance.\n Has to be True when expdand edgev.\n max_evnum_next: fix edgev size and speed up\n '''\n\n with tf.variable_scope('sort_edgev_by_angle'):\n with_batch_dim = True\n if len(get_tensor_shape(edge_aug_vidx)) == 3:\n with_batch_dim = False\n # not include batch size dim\n edge_aug_vidx = tf.expand_dims(edge_aug_vidx, 0)\n xyz = tf.expand_dims(xyz, 0)\n norm = tf.expand_dims(norm, 0)\n if xyz_raw is not None:\n xyz_raw = tf.expand(xyz_raw, 0)\n if edge_vidx_base is not None:\n edge_vidx_base = tf.expand(edge_vidx_base, 0)\n\n eshape0 = get_tensor_shape(edge_aug_vidx)\n assert len(eshape0) == 4\n batch_size, vn, evn1, evn2 = eshape0\n\n edge_vidx_next, valid_ev_num_next = EdgeVPath.clean_duplicate_edgev(edge_aug_vidx)\n if edge_vidx_base is not None:\n assert tsize(edge_vidx_base) == 3\n # only used for expand path\n edge_vidx_next, valid_ev_num_next = EdgeVPath.clean_edgebase_in_next(edge_vidx_next, edge_vidx_base, valid_ev_num_next)\n\n if max_evnum_next is not None:\n # Fix the edge vertex num here has advantage to save some time for later\n # process. But keep in mind, if some long path are cut. It's not a good\n # path any more. Because the edgev are not sorted yet.\n edge_vidx_next = edge_vidx_next[:,:,0:max_evnum_next]\n\n org_max_valid_evn = get_tensor_shape(edge_vidx_next)[-1]\n cos_angle = EdgeVPath.get_geodesic_angle(edge_vidx_next, xyz, norm, valid_ev_num_next, xyz_raw, geodis=geodis)\n\n # cycle idx\n if cycle_idx:\n cycle_num = org_max_valid_evn\n # cos_angle: [-3,1]. -5 to make cycled all smaller. Empty is smaller than -20\n cos_angle1 = cos_angle[:,:,0:cycle_num] - 5\n cos_angle = tf.concat([cos_angle, cos_angle1], -1)\n edge_vidx_next = tf.concat([edge_vidx_next, edge_vidx_next[:,:,0:cycle_num]],-1)\n sort_idx = tf.contrib.framework.argsort(cos_angle, axis=-1, direction='DESCENDING')\n if cycle_idx:\n sort_idx = sort_idx[:,:,0:org_max_valid_evn]\n\n edge_vidx_next_sorted = TfUtil.gather_third_d(edge_vidx_next, sort_idx)\n\n # if still -1, replace with the first\n edge_vidx_next_sorted = EdgeVPath.replace_neg_by_first_or_last(edge_vidx_next_sorted, 'last_valid', valid_ev_num_next)\n\n # fix shape\n if max_evnum_next is not None:\n evn_ap = max_evnum_next - get_tensor_shape(edge_vidx_next_sorted)[-1]\n edge_vidx_next_sorted = tf.concat([edge_vidx_next_sorted, tf.tile(edge_vidx_next_sorted[:,:,0:1], [1,1,evn_ap])], -1)\n\n if not with_batch_dim:\n edge_vidx_next_sorted = tf.squeeze(edge_vidx_next_sorted, 0)\n valid_ev_num_next = tf.squeeze(valid_ev_num_next, 0)\n valid_ev_num_next = tf.expand_dims(valid_ev_num_next, -1)\n\n # check\n check = tf.assert_greater(tf.reduce_min(edge_vidx_next_sorted),-1)\n with tf.control_dependencies([check]):\n edge_vidx_next_sorted = tf.identity(edge_vidx_next_sorted)\n\n\n return edge_vidx_next_sorted, valid_ev_num_next\n\n @staticmethod\n def set_inner_idx_neg(edge_vidx_base_aug, edge_vidx_base, xyz, xyz_raw=None):\n '''\n Assume all the outside vertices have angle > 90\n '''\n assert tsize(edge_vidx_base) == 3\n assert tsize(edge_vidx_base_aug) == 4\n with tf.variable_scope('set_inner_idx_neg'):\n if xyz_raw is None:\n xyz_raw = xyz\n edge2u_v = TfUtil.gather_second_d(xyz_raw, edge_vidx_base)\n edge2u = edge2u_v - tf.expand_dims(xyz, 2)\n edge2u = tf.expand_dims(edge2u, 3)\n edge2u_aug_v = TfUtil.gather_second_d(xyz_raw, edge_vidx_base_aug)\n edge2u_aug = edge2u_aug_v - tf.expand_dims(edge2u_v, 3)\n\n # the angle between edge and edge2u\n cosa = tf.reduce_sum(edge2u * edge2u_aug,-1)\n outside_mask = tf.cast(tf.greater(cosa, 0), tf.int32)\n\n edge_vidx_base_aug_cleaned = (edge_vidx_base_aug+1) * outside_mask - 1\n\n neg_mask = tf.less(edge_vidx_base_aug_cleaned, 0)\n neg_num = tf.reduce_sum(tf.cast(neg_mask, tf.int32), -1)\n max_neg_num = tf.reduce_max(neg_num)\n min_neg_num = tf.reduce_min(neg_num)\n is_cut_neg = True\n # inner (neg) idx can be cut here to save later time or memory.\n # It can also be left cut later. If the neg rate is nor large, maybe leave\n # it later saves more time\n\n return edge_vidx_base_aug_cleaned\n\n @staticmethod\n def cut_neg_by_scatter(edge_vidx):\n neg_idx = tf.where(neg_mask)\n\n @staticmethod\n def clean_edgebase_in_next(edge_vidx_next, edge_vidx_base, valid_ev_num_next):\n '''\n Sometimes edge_vidx_base can not be filtered by set_inner_idx_neg\n '''\n assert tsize(edge_vidx_next) == tsize(edge_vidx_base) == 3\n mask0 = tf.expand_dims(edge_vidx_next, 3) - tf.expand_dims(edge_vidx_base, 2)\n mask0 = tf.equal(mask0, 0)\n mask1 = tf.cast(tf.reduce_any(mask0, -1), tf.int32)\n invalid_num = tf.reduce_sum(mask1, -1)\n edge_vidx_next_cleaned = edge_vidx_next *(1- mask1) - mask1\n edge_vidx_next_cleaned = tf.contrib.framework.sort(edge_vidx_next_cleaned, -1, direction='DESCENDING')\n valid_ev_num_next = valid_ev_num_next - invalid_num\n return edge_vidx_next_cleaned, valid_ev_num_next\n\n @staticmethod\n def replace_neg_by_first_or_last(edgev0, place, valid_ev_num=None):\n assert tsize(edgev0)==3\n with tf.variable_scope('replace_neg_by_self'):\n eshape0 = get_tensor_shape(edgev0)\n neg_mask = tf.less(edgev0, 0)\n any_neg = tf.reduce_any(neg_mask)\n def do_replace_by_first():\n neg_vidx = tf.cast(tf.where(neg_mask), tf.int32)\n if place == 'first':\n placement = tf.gather_nd(edgev0, neg_vidx*tf.constant([[1,1,0]]))\n elif place == 'last_valid':\n valid_ev_num_cycled = tf.minimum(valid_ev_num*2, eshape0[-1])\n last_valid_idx = tf.expand_dims(tf.gather_nd( valid_ev_num_cycled-1, neg_vidx[:,0:2]), -1)\n last_valid_idx = tf.concat([neg_vidx[:,0:2], last_valid_idx], -1)\n placement = tf.gather_nd(edgev0, last_valid_idx)\n tmp = tf.scatter_nd(neg_vidx, placement+1, eshape0)\n return edgev0 + tmp\n def no_op():\n return edgev0\n edgev1 = tf.cond(any_neg, do_replace_by_first, no_op)\n return edgev1\n\n @staticmethod\n def clean_duplicate_edgev(edge_aug_vidx):\n eshape0 = get_tensor_shape(edge_aug_vidx)\n assert len(eshape0) == 4\n batch_size, vn, evn1, evn2 = eshape0\n\n with tf.variable_scope('clean_duplicate_edgev'):\n # remove duplicated\n edgev_idx0 = tf.reshape(edge_aug_vidx, [batch_size, vn, -1])\n edgev_idx0 = tf.contrib.framework.sort(edgev_idx0, -1, direction='DESCENDING')\n tmp = edgev_idx0[:, :, 0:-1]\n dif_with_pre = tf.not_equal(edgev_idx0[:,:,1:], tmp)\n tmp = tf.constant(True, tf.bool, [batch_size, 1, 1])\n tmp = tf.tile(tmp, [1,vn,1])\n dif_with_pre = tf.concat([tmp, dif_with_pre], -1)\n edgev_idx0 = (1+edgev_idx0) * tf.cast(dif_with_pre, tf.int32) - 1\n edgev_idx0 = tf.contrib.framework.sort(edgev_idx0, -1, direction='DESCENDING')\n valid_ev_num = tf.reduce_sum(tf.cast(tf.greater(edgev_idx0, -1),tf.int32), -1)\n max_evn = tf.reduce_max(valid_ev_num)\n edge_vidx_next = edgev_idx0[:,:,0:max_evn]\n\n #mean_evn = tf.reduce_mean(valid_ev_num)\n #hist = tf.histogram_fixed_width(valid_ev_num, [1,max_evn+1], nbins=max_evn)\n #hist = tf.cast(hist, tf.float32)\n #hist_rates = hist / tf.reduce_sum(hist)\n #hist_rates = tf.cast(hist_rates * 100, tf.int32)\n #bins = tf.range(1,max_evn+1)\n return edge_vidx_next, valid_ev_num\n\n @staticmethod\n def get_zero_vec(norm):\n # norm: [batch_size, vn, 3]\n assert tsize(norm) == 4\n with tf.variable_scope('get_zero_vec'):\n x = tf.constant([1,0,0], tf.float32, [1,1,1,3])\n x_tan, costheta_x = EdgeVPath.project_vector(x, norm)\n use_x = tf.cast(tf.less(tf.abs(costheta_x), 0.7), tf.float32)\n\n y = tf.constant([0,1,0], tf.float32, [1,1,1,3])\n y_tan, costheta_y = EdgeVPath.project_vector(y, norm)\n\n zero_vec = x_tan * use_x + y_tan *(1-use_x)\n tmp = tf.norm(zero_vec, axis=-1, keepdims=True)\n min_project_norm = tf.reduce_min(tmp)\n zero_vec /= tmp\n\n nan_mask = tf.is_nan(zero_vec)\n nan_idx = tf.where(nan_mask)\n any_nan = tf.reduce_any(nan_mask)\n check1 = tf.assert_greater(min_project_norm, 0.6)\n check2 = tf.assert_equal(any_nan, False)\n with tf.control_dependencies([check1, check2]):\n zero_vec = tf.identity(zero_vec)\n return zero_vec\n\n\n @staticmethod\n def project_vector(edgev, norm):\n # not normalized\n assert tsize(edgev) == 4\n assert tsize(norm) == 4\n with tf.variable_scope('project_vector'):\n costheta = tf.reduce_sum(edgev * norm, -1, keepdims=True)\n edgev_tangent = edgev - costheta * norm\n return edgev_tangent, costheta\n\n @staticmethod\n def get_geodesic_angle(edgev_idx, xyz, norm, valid_ev_num=None, xyz_raw=None, geodis=None):\n assert tsize(edgev_idx) == 3\n assert tsize(norm) == 3\n batch_size, vertex_num, evn = get_tensor_shape(edgev_idx)\n\n with tf.variable_scope('geodesic_angle'):\n empty_mask = tf.cast(tf.less(edgev_idx, 0), tf.float32)\n edgev_idx = EdgeVPath.replace_neg_by_first_or_last(edgev_idx, 'first')\n if xyz_raw is None:\n xyz_raw = xyz\n edgev = TfUtil.gather_second_d(xyz_raw, edgev_idx) - tf.expand_dims(xyz,2)\n # (1) Project edgev to the tangent plane: edgev_tan\n # Project x to the tangent to get 0 angle vec\n norm = tf.expand_dims(norm,2)\n edgev_tan, _ = EdgeVPath.project_vector(edgev, norm)\n zero_vec = EdgeVPath.get_zero_vec(norm)\n\n # (2) Get cos of the angel\n ev_tan_norm = tf.norm(edgev_tan, axis=-1, keepdims=True)\n ev_norm = tf.norm(edgev, axis=-1, keepdims=True)\n\n check_manifold = True\n if check_manifold:\n valid_mask = TfUtil.valid_num_to_mask(valid_ev_num, evn)\n ev_norm_rate = ev_tan_norm / (ev_norm+1e-5)\n # For a manifold, ev_norm_rate shoul close to 1\n min_norm_rate = TfUtil.mask_reduce_min(tf.squeeze(ev_norm_rate,-1), valid_mask)\n # normally 0.98 for geodis=1; 0.97617507 for geodis=2;\n # 0.96850145 for geodis=3\n mean_norm_rate = TfUtil.mask_reduce_mean(tf.squeeze(ev_norm_rate,-1), valid_mask)\n check_manifold = tf.assert_greater(mean_norm_rate, 0.93, message=\"check manifold failed geodis={}\".format(geodis))\n with tf.control_dependencies([check_manifold]):\n ev_norm = tf.identity(ev_norm)\n\n edgev_tan_normed = edgev_tan / (ev_norm+1e-5)\n cos_angle = tf.reduce_sum(zero_vec * edgev_tan_normed, -1)\n\n # (3) Judge if the angle is over 180\n # the norm for each edgev\n edgev_norm = tf.cross(tf.tile(zero_vec,[1,1,evn,1]), edgev_tan_normed)\n #nan_idx = tf.where( tf.is_nan(edgev_norm))\n cos_nn = tf.reduce_sum(norm * edgev_norm, -1)\n over_pi = tf.cast(tf.less(cos_nn, 0), tf.float32)\n # (3.1)change sign if over pi. (3.2) minus 2 if over pi.\n sign = 1-over_pi*2\n # [-3,1]\n cos_angle = cos_angle * sign - 2*over_pi\n\n # (4) set cos angle for empty as -10\n cos_angle = cos_angle * (1-empty_mask) + empty_mask * (-20)\n\n # check nan\n any_nan = tf.reduce_any(tf.is_nan(cos_angle))\n check = tf.assert_equal(any_nan, False, message='cos angel nan')\n with tf.control_dependencies([check]):\n cos_angle = tf.identity(cos_angle)\n return cos_angle\n\n @staticmethod\n def expand_path(edge_vidx_base, valid_ev_num_base, edge_vidx_interval,\n valid_ev_num_interval, xyz, norm, xyz_raw=None, geodis=None, max_evnum_next=None):\n '''\n expand path from edge_vidx_base by edge_vidx_interval\n '''\n #print('\\nstart expand_path {}'.format(geodis))\n\n with_batch_dim = True\n if len(get_tensor_shape(edge_vidx_base)) == 2:\n with_batch_dim = False\n edge_vidx_base = tf.expand_dims(edge_vidx_base, 0)\n valid_ev_num_base = tf.expand_dims(valid_ev_num_base, 0)\n edge_vidx_interval = tf.expand_dims(edge_vidx_interval, 0)\n valid_ev_num_interval = tf.expand_dims(valid_ev_num_interval, 0)\n xyz = tf.expand_dims(xyz, 0)\n norm = tf.expand_dims(norm, 0)\n if xyz_raw:\n xyz_raw = tf.expand_dims(xyz_raw,0)\n assert tsize(edge_vidx_base) == tsize(edge_vidx_interval) == \\\n tsize(valid_ev_num_base) == tsize(valid_ev_num_interval) == \\\n tsize(xyz) == tsize(norm) == 3\n\n #***************************************************************************\n max_evn_base = tf.reduce_max(valid_ev_num_base)\n edge_vidx_base = edge_vidx_base[:,:,0:max_evn_base]\n\n max_evn_interval = tf.reduce_max(valid_ev_num_interval)\n edge_vidx_interval = edge_vidx_interval[:,:,0:max_evn_interval]\n\n with tf.variable_scope('expand_path_%s'%(geodis)):\n edge_vidx_base_aug = TfUtil.gather_second_d(edge_vidx_interval, edge_vidx_base)\n edge_vidx_base_aug = EdgeVPath.set_inner_idx_neg(edge_vidx_base_aug, edge_vidx_base, xyz, xyz_raw)\n edge_vidx_next, valid_ev_num_next = EdgeVPath.sort_edgev_by_angle(edge_vidx_base_aug,\n xyz, norm, xyz_raw, cycle_idx=True, max_evnum_next=max_evnum_next, edge_vidx_base=edge_vidx_base, geodis=geodis)\n\n #***************************************************************************\n is_ply = False\n if is_ply:\n EdgeVPath.edge_vidx_ply(edge_vidx_next[0], xyz_raw[0], valid_ev_num_next[0], geodis)\n if geodis==2:\n EdgeVPath.edge_vidx_ply(edge_vidx_base[0], xyz_raw[0], valid_ev_num_base[0], 1)\n\n if not with_batch_dim:\n edge_vidx_next = tf.squeeze(edge_vidx_next, 0)\n valid_ev_num_next = tf.squeeze(valid_ev_num_next, 0)\n xyz = tf.squeeze(xyz, 0)\n\n return edge_vidx_next, valid_ev_num_next\n\n @staticmethod\n def random_down_sp_idx(org_num, batch_size, sp_rate):\n aim_num = tf.cast(tf.ceil(org_num * sp_rate), tf.int32)\n sp_idx_ls = []\n for i in range(batch_size):\n sp_idx_i = tf.random_shuffle(tf.range(org_num))[0:aim_num]\n sp_idx_ls.append( tf.expand_dims(sp_idx_i, 0) )\n sp_idx = tf.concat(sp_idx_ls, 0)\n return sp_idx\n\n @staticmethod\n def down_sample_edgevidx(sp_rate, edge_vidx, valid_ev_num, xyz, norm):\n assert tsize(edge_vidx) == 3\n batch_size, org_num, evn = TfUtil.get_tensor_shape(edge_vidx)\n sp_idx = EdgeVPath.random_down_sp_idx(org_num, batch_size, sp_rate)\n #sp_idx = tf.contrib.framework.sort(sp_idx, -1, direction='ASCENDING')\n\n #sp_idx = tf.reshape(tf.range(180000,180000+10), [1,-1])\n\n edge_vidx_dsp = TfUtil.gather_second_d(edge_vidx, sp_idx)\n valid_ev_num_dsp = TfUtil.gather_second_d(valid_ev_num, sp_idx)\n norm_dsp = TfUtil.gather_second_d(norm, sp_idx)\n xyz_dsp = TfUtil.gather_second_d(xyz, sp_idx)\n return edge_vidx_dsp, valid_ev_num_dsp, xyz_dsp, norm_dsp, sp_idx\n\n @staticmethod\n def main_test_expand_path(edge1u_vidx, valid_1uev_num, xyz, norm):\n with_batch_dim = True\n if TfUtil.tsize(edge1u_vidx) == 2:\n with_batch_dim = False\n edge1u_vidx = tf.expand_dims(edge1u_vidx, 0)\n valid_1uev_num = tf.expand_dims(valid_1uev_num, 0)\n xyz = tf.expand_dims(xyz, 0)\n norm = tf.expand_dims(norm, 0)\n\n xyz_raw = xyz\n edge1u_vidx_raw = edge1u_vidx\n valid_1uev_num_raw = valid_1uev_num\n sp_rate = 0.01\n\n #-------------------------------\n if sp_rate<1.0:\n edge1u_vidx, valid_1uev_num, xyz, norm, sp_idx_1u = \\\n EdgeVPath.down_sample_edgevidx(sp_rate, edge1u_vidx, valid_1uev_num, xyz, norm)\n\n edge2u_vidx, valid_2uev_num = EdgeVPath.expand_path(edge1u_vidx, valid_1uev_num,\n edge1u_vidx_raw, valid_1uev_num_raw, xyz, norm, xyz_raw, geodis=2, max_evnum_next=28)\n\n #-------------------------------\n sp_rate = 1.01\n if sp_rate<1.0:\n edge2u_vidx, valid_2uev_num, xyz, norm, sp_idx_2u = \\\n EdgeVPath.down_sample_edgevidx(sp_rate, edge2u_vidx, valid_2uev_num, xyz, norm)\n\n edge3u_vidx, valid_3uev_num = EdgeVPath.expand_path(edge2u_vidx, valid_2uev_num,\n edge1u_vidx_raw, valid_1uev_num_raw, xyz, norm, xyz_raw, geodis=3, max_evnum_next=50)\n #-------------------------------\n if sp_rate<1.0:\n edge3u_vidx, valid_3uev_num, xyz, norm, sp_idx_3u = \\\n EdgeVPath.down_sample_edgevidx(sp_rate, edge3u_vidx, valid_3uev_num, xyz, norm)\n\n edge4u_vidx, valid_4uev_num = EdgeVPath.expand_path(edge3u_vidx, valid_3uev_num,\n edge1u_vidx_raw, valid_1uev_num_raw, xyz, norm, xyz_raw, geodis=4, max_evnum_next=60)\n print('expand path test ok')\n\n @staticmethod\n def edge_vidx_ply(edgev_idx, xyz, valid_ev_num, geodis):\n assert tsize(edgev_idx) == 2\n print('start gen edgev_idx ply as geodis={}'.format(geodis))\n xyz = xyz.numpy()\n edgev_idx = edgev_idx.numpy()\n\n is_downsp = False\n\n if is_downsp:\n vn, evn = TfUtil.get_tensor_shape(edgev_idx)\n np.random.seed(0)\n sample_idx = np.random.choice(vn, 500, False)\n edgev_idx = np.take(edgev_idx, sample_idx, axis=0)\n\n ply_fn = '/home/z/Desktop/plys/edgev_{}.ply'.format(geodis)\n edges = ply_util.closed_path_to_edges(edgev_idx, valid_ev_num)\n ply_util.gen_mesh_ply(ply_fn, xyz, edges)\n\n\nclass VertexDecimation():\n _sp_w_norm = 0.2\n @staticmethod\n def down_sampling_mesh(_num_vertex_sp, raw_datas, mesh_summary):\n with_batch_dim = True\n if TfUtil.tsize(raw_datas['xyz']) == 2:\n with_batch_dim = False\n for item in raw_datas:\n raw_datas[item] = tf.expand_dims(raw_datas[item], 0)\n\n down_sp_method = 'prefer_smooth'\n num_vertex0 = TfUtil.get_tensor_shape(raw_datas['xyz'])[1]\n if down_sp_method == 'prefer_smooth':\n smooth_factor = VertexDecimation.get_smooth_perv_raw(raw_datas['xyz'],\n raw_datas['nxnynz'], raw_datas['edgev_per_vertex'], raw_datas['valid_ev_num_pv'])\n vertex_spidx, vertex_rm_idx, sp_smooth_loss = VertexDecimation.smooth_sampling(smooth_factor, _num_vertex_sp)\n #GenPlys.gen_mesh_ply_basic(raw_datas, 'vertex_decimation', 'nw_%d'%(10*VertexDecimation._sp_w_norm),\n # vertex_spidx=vertex_spidx, vertex_rm_idx=vertex_rm_idx)\n\n elif down_sp_method == 'random':\n vertex_spidx = MeshSampling.down_sampling_vertex_random(\n num_vertex0, _num_vertex_sp)\n\n raise NotImplementedError\n VertexDecimation.update_face(vertex_spidx, vertex_rm_idx, raw_datas['vidx_per_face'], num_vertex0)\n import pdb; pdb.set_trace() # XXX BREAKPOINT\n\n face_sp_indices, vidx_per_face_new, edgev_per_vertex_new, valid_ev_num_pv_new =\\\n VertexDecimation.update_face_edgev(\n vertex_spidx, num_vertex0, raw_datas['vidx_per_face'],\n raw_datas['edgev_per_vertex'], raw_datas['valid_ev_num_pv'], xyz=raw_datas['xyz'],\n mesh_summary=mesh_summary)\n raw_datas = MeshSampling.gather_datas(raw_datas, vertex_spidx,\n face_sp_indices, vidx_per_face_new,\n edgev_per_vertex_new, valid_ev_num_pv_new)\n\n\n @staticmethod\n def get_smooth_perv_raw(xyz, norm, edgev_per_vertex, valid_ev_num_pv):\n xyz_smooth = VertexDecimation.get_smooth_perv(xyz, edgev_per_vertex, valid_ev_num_pv)\n norm_smooth = VertexDecimation.get_smooth_perv(norm, edgev_per_vertex, valid_ev_num_pv)\n norm_w = VertexDecimation._sp_w_norm\n smooth_factor = norm_smooth * norm_w + xyz_smooth * (1-norm_w)\n return smooth_factor\n\n @staticmethod\n def smooth_sampling(smooth_factor, _num_vertex_sp):\n sort_idx = tf.contrib.framework.argsort(smooth_factor, axis=-1, direction='DESCENDING')\n sp_idx = sort_idx[:,0:_num_vertex_sp]\n rm_idx = sort_idx[:,_num_vertex_sp:]\n\n rm_smoothf = TfUtil.gather_second_d(smooth_factor, rm_idx)\n sp_smooth_loss = tf.reduce_mean(rm_smoothf,-1)\n return sp_idx, rm_idx, sp_smooth_loss\n\n @staticmethod\n def get_smooth_perv(features, edgev_per_vertex, valid_ev_num_pv):\n '''\n Smaller mean smoother\n '''\n assert TfUtil.tsize(features) == TfUtil.tsize(edgev_per_vertex) == 3\n max_evn = tf.reduce_max(valid_ev_num_pv)\n mean_evn = tf.cast( tf.reduce_mean(valid_ev_num_pv), tf.float32)\n evn = tf.minimum(max_evn, tf.cast(mean_evn*1.6, tf.int32))\n edgev_per_vertex = edgev_per_vertex[:,:,0:evn]\n\n features_aug = TfUtil.gather_second_d(features, edgev_per_vertex)\n mean_f = tf.reduce_mean(features_aug, -2)\n mean_err = tf.norm( mean_f - features, axis=-1)\n tmp = tf.reduce_mean(mean_err, axis=-1, keepdims=True)\n smooth = mean_err / tmp\n\n return smooth\n\n\n @staticmethod\n def get_raw_vidx_2_sp_vidx(vertex_spidx, num_vertex0):\n assert len(get_tensor_shape(vertex_spidx)) == 2\n batch_size, _num_vertex_sp = TfUtil.get_tensor_shape(vertex_spidx)\n batch_idx = tf.tile(tf.reshape(tf.range(batch_size), [-1,1,1]), [1,_num_vertex_sp,1])\n vertex_spidx = tf.expand_dims(tf.cast(vertex_spidx, tf.int32),-1)\n vertex_spidx = tf.concat([batch_idx, vertex_spidx], -1)\n\n new_vidx = tf.tile(tf.reshape(tf.range(_num_vertex_sp),[1,-1]), [batch_size,1])\n raw_vidx_2_sp_vidx = tf.scatter_nd(vertex_spidx, new_vidx+1, [batch_size, num_vertex0])-1\n return raw_vidx_2_sp_vidx\n\n @staticmethod\n def update_edgev(vertex_spidx, vertex_rm_idx, edgev_per_vertex, num_vertex0):\n pass\n\n @staticmethod\n def update_face(vertex_spidx, vertex_rm_idx, vidx_per_face, num_vertex0):\n raw_vidx_2_sp_vidx = VertexDecimation.get_raw_vidx_2_sp_vidx(vertex_spidx, num_vertex0)\n\n edgev_rmv = TfUtil.gather_second_d()\n import pdb; pdb.set_trace() # XXX BREAKPOINT\n pass\n\n\n @staticmethod\n def update_face_edgev(vertex_spidx, num_vertex0, vidx_per_face, edgev_per_vertex,\n valid_ev_num_pv, xyz, mesh_summary):\n edgev_per_vertex_new3, valid_ev_num_pv_new, raw_edgev_spvidx = MeshSampling.rich_edges(vertex_spidx,\\\n edgev_per_vertex, xyz, raw_vidx_2_sp_vidx, valid_ev_num_pv, mesh_summary)\n import pdb; pdb.set_trace() # XXX BREAKPOINT\n face_sp_indices, vidx_per_face_new, raw_vidx_2_sp_vidx = MeshSampling.down_sampling_face(\\\n vertex_spidx, num_vertex0, vidx_per_face, False)\n return face_sp_indices, vidx_per_face_new, edgev_per_vertex_new3, valid_ev_num_pv_new\n\n return raw_datas\n\n @staticmethod\n def contract_vertex_pairs(vertex_spidx, edgev_per_vertex, xyz, raw_vidx_2_sp_vidx,\n valid_ev_num_pv, mesh_summary={}, max_fail_2unit_ev_rate=None, scale=None):\n assert len(get_tensor_shape(vertex_spidx)) == 1\n assert len(get_tensor_shape(edgev_per_vertex)) == len(get_tensor_shape(xyz)) == 2\n pass\n\n\nclass GenPlys():\n @staticmethod\n def gen_mesh_ply_basic(datas, dir_name='', base_name='', ply_dir=None, gen_edgev=False,\n vertex_spidx=None, vertex_rm_idx=None):\n if ply_dir == None:\n ply_dir = '/tmp/plys'\n path = '{}/{}'.format(ply_dir, dir_name)\n if base_name=='':\n base_name = '1'\n for item in datas:\n if isinstance(datas[item], tf.Tensor):\n datas[item] = datas[item].numpy()\n if datas[item].ndim == 3:\n datas[item] = datas[item][0]\n\n if vertex_spidx is not None:\n if isinstance(vertex_spidx, tf.Tensor):\n vertex_spidx = vertex_spidx.numpy()\n if vertex_spidx.ndim == 2:\n vertex_spidx = vertex_spidx[0]\n\n # **************\n if gen_edgev:\n ply_fn = '{}/edgev_{}.ply'.format(path, base_name)\n down_sample_rate = 1e-1\n if 'edgev_per_vertex' in datas:\n edgev_per_vertex = datas['edgev_per_vertex']\n edgev_vidx_per_face = MeshSampling.edgev_to_face(edgev_per_vertex, datas['valid_ev_num_pv'])\n\n ply_util.gen_mesh_ply(ply_fn, datas['xyz'], edgev_vidx_per_face,\n vertex_color=datas['color'])\n\n # **************\n num_vertex0 = datas['xyz'].shape[0]\n if vertex_spidx is not None:\n sp_xyz = np.take(datas['xyz'], vertex_spidx, axis=0)\n rm_xyz = np.take(datas['xyz'], vertex_rm_idx, axis=0)\n ply_fn = '{}/sp_{}.ply'.format(path, base_name)\n ply_util.create_ply(sp_xyz, ply_fn)\n ply_fn = '{}/rm_{}.ply'.format(path, base_name)\n ply_util.create_ply(rm_xyz, ply_fn)\n\n\n # **************\n ply_fn = '{}/{}.ply'.format(path, base_name)\n label_category = datas['label_category']\n if 'vidx_per_face' in datas:\n ply_util.gen_mesh_ply(ply_fn, datas['xyz'], datas['vidx_per_face'],\n face_label=label_category)\n else:\n ply_util.create_ply(datas['xyz'], ply_fn)\n\nif __name__ == '__main__':\n dataset_name = 'MATTERPORT'\n dset_path = '/DS/Matterport3D/Matterport3D_WHOLE_extracted/v1/scans'\n tfrecord_path = '/DS/Matterport3D/MATTERPORT_TF/mesh_tfrecord_555'\n tfrecord_path = '/home/z/Research/SparseVoxelNet/data/MATTERPORT_TF/mesh_tfrecord'\n read_tfrecord(dataset_name, tfrecord_path)\n\n\n\n"
] |
[
[
"numpy.array",
"numpy.zeros"
],
[
"tensorflow.is_nan",
"tensorflow.cond",
"tensorflow.device",
"tensorflow.enable_eager_execution",
"tensorflow.concat",
"numpy.take",
"tensorflow.control_dependencies",
"tensorflow.zeros",
"tensorflow.reduce_sum",
"tensorflow.equal",
"tensorflow.cast",
"tensorflow.minimum",
"tensorflow.abs",
"tensorflow.where",
"tensorflow.sparse_add",
"tensorflow.argmin",
"tensorflow.cumsum",
"tensorflow.assert_greater",
"tensorflow.Graph",
"tensorflow.while_loop",
"tensorflow.greater",
"tensorflow.logical_or",
"tensorflow.squeeze",
"tensorflow.contrib.framework.argsort",
"tensorflow.assert_less",
"tensorflow.ConfigProto",
"tensorflow.gather",
"tensorflow.nn.top_k",
"tensorflow.unique_with_counts",
"tensorflow.contrib.framework.sort",
"tensorflow.Session",
"tensorflow.ceil",
"tensorflow.assert_greater_equal",
"tensorflow.logical_not",
"tensorflow.tile",
"tensorflow.reverse",
"tensorflow.norm",
"tensorflow.Print",
"tensorflow.gather_nd",
"tensorflow.unique",
"tensorflow.less",
"tensorflow.reduce_any",
"tensorflow.shape",
"numpy.random.choice",
"tensorflow.identity",
"tensorflow.scatter_nd",
"tensorflow.assert_equal",
"tensorflow.placeholder",
"tensorflow.not_equal",
"tensorflow.reduce_max",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.reduce_mean",
"numpy.random.seed",
"tensorflow.maximum",
"tensorflow.reshape",
"tensorflow.timestamp",
"tensorflow.expand_dims",
"tensorflow.ones",
"numpy.cos",
"tensorflow.mod",
"tensorflow.reduce_min",
"tensorflow.variable_scope",
"tensorflow.greater_equal",
"tensorflow.reduce_all",
"tensorflow.expand",
"tensorflow.logical_and"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"2.7",
"1.12",
"2.6",
"2.2",
"1.13",
"2.3",
"2.4",
"1.4",
"2.9",
"1.5",
"1.7",
"2.5",
"0.12",
"1.0",
"2.8",
"1.2",
"2.10"
]
}
] |
kutao207/SiamGCN
|
[
"d53a5d9b91428cc51811803abab1877791af35d3"
] |
[
"main_pointnet.py"
] |
[
"import os\nimport os.path as osp\n\nimport argparse\nfrom datetime import datetime\n\nimport torch \nimport torch.nn.functional as F\nfrom torch.nn import Sequential as Seq, Linear as Lin, ReLU, BatchNorm1d as BN, Dropout\nfrom torch.optim.lr_scheduler import StepLR\n\nimport torch_geometric.transforms as T\nfrom torch_geometric.data import DataLoader\nfrom torch_geometric.nn import PointConv, fps, radius, global_max_pool\n\nfrom change_dataset import ChangeDataset, MyDataLoader\nfrom transforms import NormalizeScale, SamplePoints\nfrom metric import ConfusionMatrix\n\nfrom focal_loss import focal_loss\nfrom contrastive_loss import ContrastiveLoss\n\nfrom imbalanced_sampler import ImbalancedDatasetSampler\n\nfrom pointnet2 import SAModule, GlobalSAModule, MLP\n\nfrom utils import ktprint, set_logger, check_dirs\n\n#### log file setting\n\ncur_filename = osp.splitext(osp.basename(__file__))[0]\nlog_dir = 'logs'\ncheck_dirs(log_dir)\nlog_filename = osp.join(log_dir, '{}_{date:%Y-%m-%d_%H_%M_%S}'.format(cur_filename, date=datetime.now())+'.logging')\nset_logger(log_filename)\nprint = ktprint\n#### log file setting finished!\n\n\n# 0 1 2 3 4\n# [\"nochange\",\"removed\",\"added\",\"change\",\"color_change\"]\n\nNUM_CLASS = 5\nUSING_FOCAL_LOSS = False\nUSING_IMBALANCE_SAMPLING = True\nUSING_CONTRASTIVE_LOSS = False\n\nclass Net_cas(torch.nn.Module):\n def __init__(self) -> None:\n super().__init__()\n input_feature_dim = 6\n self.sa1_module = SAModule(0.5, 0.2, MLP([input_feature_dim, 64, 64, 128]))\n self.sa1_global_module = GlobalSAModule(MLP([128+3, 256, 512, 1024]))\n self.sa2_module = SAModule(0.25, 0.4, MLP([128 + 3, 128, 128, 256])) \n self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024]))\n\n self.lin1 = Lin(1024 * 2, 512)\n self.lin2 = Lin(512, 256)\n self.lin3 = Lin(256, NUM_CLASS)\n\n def forward(self, data):\n sa0_b1_input = (data.x[:,3:], data.x[:,:3], data.batch)\n sa0_b2_input = (data.x2[:,3:], data.x2[:,:3], data.batch2)\n\n sa1_b1_out = self.sa1_module(*sa0_b1_input)\n sa2_b1_out = self.sa2_module(*sa1_b1_out)\n sa3_b1_out = self.sa3_module(*sa2_b1_out)\n\n sa1_b1_global_out = self.sa1_global_module(*sa1_b1_out)\n\n sa1_b2_out = self.sa1_module(*sa0_b2_input)\n sa2_b2_out = self.sa2_module(*sa1_b2_out)\n sa3_b2_out = self.sa3_module(*sa2_b2_out)\n\n sa1_b2_global_out = self.sa1_global_module(*sa1_b2_out)\n\n # x1, pos1, _ = sa3_b1_out\n # x2, pos2, _ = sa3_b2_out\n\n x1 = torch.cat((sa1_b1_global_out[0], sa3_b1_out[0]), -1)\n x2 = torch.cat((sa1_b2_global_out[0], sa3_b2_out[0]), -1)\n\n x = x1 + x2\n\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = F.relu(self.lin2(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin3(x)\n return F.log_softmax(x, dim=-1)\n\n\nclass Net(torch.nn.Module):\n def __init__(self):\n super().__init__()\n input_feature_dim = 6\n self.sa1_module = SAModule(0.5, 0.2, MLP([input_feature_dim, 64, 64, 128]))\n self.sa2_module = SAModule(0.25, 0.4, MLP([128 + 3, 128, 128, 256]))\n self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024]))\n\n self.lin1 = Lin(1024, 512)\n self.lin2 = Lin(512, 256)\n self.lin3 = Lin(256, NUM_CLASS)\n\n def forward(self, data):\n sa0_b1_input = (data.x[:,3:], data.x[:,:3], data.batch)\n sa0_b2_input = (data.x2[:,3:], data.x2[:,:3], data.batch2)\n\n sa1_b1_out = self.sa1_module(*sa0_b1_input)\n sa2_b1_out = self.sa2_module(*sa1_b1_out)\n sa3_b1_out = self.sa3_module(*sa2_b1_out)\n\n sa1_b2_out = self.sa1_module(*sa0_b2_input)\n sa2_b2_out = self.sa2_module(*sa1_b2_out)\n sa3_b2_out = self.sa3_module(*sa2_b2_out)\n\n x1, pos1, _ = sa3_b1_out\n x2, pos2, _ = sa3_b2_out\n\n x = x1 + x2\n\n x = F.relu(self.lin1(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = F.relu(self.lin2(x))\n x = F.dropout(x, p=0.5, training=self.training)\n x = self.lin3(x)\n return F.log_softmax(x, dim=-1)\n\nclass Net_con(torch.nn.Module):\n def __init__(self):\n super().__init__()\n input_feature_dim = 6\n self.sa1_module = SAModule(0.5, 0.2, MLP([input_feature_dim, 64, 64, 128]))\n self.sa2_module = SAModule(0.25, 0.4, MLP([128 + 3, 128, 128, 256]))\n self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024]))\n\n # self.lin1 = Lin(1024, 512)\n # self.lin2 = Lin(512, 256)\n # self.lin3 = Lin(256, NUM_CLASS)\n\n self.lin = Seq(\n Lin(1024, 512), ReLU(), Dropout(p=0.5),\n Lin(512, 256), ReLU(), Dropout(p=0.5) \n )\n self.lin_1 = Lin(256, NUM_CLASS)\n self.lin_2 = Lin(256, NUM_CLASS)\n\n def forward(self, data):\n sa0_b1_input = (data.x[:,3:], data.x[:,:3], data.batch)\n sa0_b2_input = (data.x2[:,3:], data.x2[:,:3], data.batch2)\n\n sa1_b1_out = self.sa1_module(*sa0_b1_input)\n sa2_b1_out = self.sa2_module(*sa1_b1_out)\n sa3_b1_out = self.sa3_module(*sa2_b1_out)\n\n sa1_b2_out = self.sa1_module(*sa0_b2_input)\n sa2_b2_out = self.sa2_module(*sa1_b2_out)\n sa3_b2_out = self.sa3_module(*sa2_b2_out)\n\n x1, pos1, _ = sa3_b1_out\n x2, pos2, _ = sa3_b2_out\n\n # x = x1 + x2\n\n # x = F.relu(self.lin1(x))\n # x = F.dropout(x, p=0.5, training=self.training)\n # x = F.relu(self.lin2(x))\n # x = F.dropout(x, p=0.5, training=self.training)\n # x = self.lin3(x)\n # return F.log_softmax(x, dim=-1)\n x1 = self.lin(x1)\n x2 = self.lin(x2)\n x1_out = F.log_softmax(self.lin_1(x1), dim=-1)\n x2_out = F.log_softmax(self.lin_2(x2), dim=-1)\n\n return (x1_out, x2_out)\n\nclass Net_2(torch.nn.Module):\n def __init__(self):\n super().__init__()\n input_feature_dim = 6\n self.sa1_module = SAModule(0.5, 0.2, MLP([input_feature_dim, 64, 64, 128]))\n # self.sa2_module = SAModule(0.25, 0.4, MLP([128 + 3, 128, 128, 256]))\n # self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024]))\n self.sa3_module = GlobalSAModule(MLP([128 + 3, 256, 512, 1024]))\n\n # self.lin1 = Lin(1024, 512)\n # self.lin2 = Lin(512, 256)\n # self.lin3 = Lin(256, NUM_CLASS)\n\n # self.lin = Seq(\n # Lin(1024, 512), ReLU(), Dropout(p=0.5),\n # Lin(512, 256), ReLU(), Dropout(p=0.5) \n # )\n # self.lin_1 = Lin(256, NUM_CLASS)\n # self.lin_2 = Lin(256, NUM_CLASS)\n\n self.lin = Seq(\n Lin(1024, 256), ReLU(), Dropout(p=0.5) \n )\n self.lin_1 = Lin(256, 128)\n self.lin_2 = Lin(256, 128)\n\n self.lin_last = Lin(128, NUM_CLASS)\n\n def forward(self, data):\n sa0_b1_input = (data.x[:,3:], data.x[:,:3], data.batch)\n sa0_b2_input = (data.x2[:,3:], data.x2[:,:3], data.batch2)\n\n # Using less layers\n\n # sa1_b1_out = self.sa1_module(*sa0_b1_input)\n # sa2_b1_out = self.sa2_module(*sa1_b1_out)\n # sa3_b1_out = self.sa3_module(*sa2_b1_out)\n\n # sa1_b2_out = self.sa1_module(*sa0_b2_input)\n # sa2_b2_out = self.sa2_module(*sa1_b2_out)\n # sa3_b2_out = self.sa3_module(*sa2_b2_out)\n\n sa1_b1_out = self.sa1_module(*sa0_b1_input) \n sa3_b1_out = self.sa3_module(*sa1_b1_out)\n\n sa1_b2_out = self.sa1_module(*sa0_b2_input) \n sa3_b2_out = self.sa3_module(*sa1_b2_out)\n\n x1, pos1, _ = sa3_b1_out\n x2, pos2, _ = sa3_b2_out\n\n x = x1 - x2\n\n x = self.lin(x)\n x1, x2 = self.lin_1(x), self.lin_2(x)\n x_out = F.dropout(F.relu(x1-x2), p=0.6)\n\n x_out = self.lin_last(x_out)\n\n return F.log_softmax(x, dim=-1)\n\n # x = x1 + x2\n\n # x = F.relu(self.lin1(x))\n # x = F.dropout(x, p=0.5, training=self.training)\n # x = F.relu(self.lin2(x))\n # x = F.dropout(x, p=0.5, training=self.training)\n # x = self.lin3(x)\n # return F.log_softmax(x, dim=-1)\n\n # x1 = self.lin(x1)\n # x2 = self.lin(x2)\n # x1_out = F.log_softmax(self.lin_1(x1), dim=-1)\n # x2_out = F.log_softmax(self.lin_2(x2), dim=-1)\n\n # return (x1_out, x2_out)\n\nclass Net_3(torch.nn.Module):\n def __init__(self):\n super().__init__()\n input_feature_dim = 6\n self.sa1_module = SAModule(0.5, 0.2, MLP([input_feature_dim, 64, 64, 128]))\n # self.sa2_module = SAModule(0.25, 0.4, MLP([128 + 3, 128, 128, 256]))\n # self.sa3_module = GlobalSAModule(MLP([256 + 3, 256, 512, 1024]))\n self.sa3_module = GlobalSAModule(MLP([128 + 3, 256, 512, 1024]))\n\n # self.lin1 = Lin(1024, 512)\n # self.lin2 = Lin(512, 256)\n # self.lin3 = Lin(256, NUM_CLASS)\n\n # self.lin = Seq(\n # Lin(1024, 512), ReLU(), Dropout(p=0.5),\n # Lin(512, 256), ReLU(), Dropout(p=0.5) \n # )\n # self.lin_1 = Lin(256, NUM_CLASS)\n # self.lin_2 = Lin(256, NUM_CLASS)\n\n self.lin = Seq(\n Lin(1024, 256), ReLU(), Dropout(p=0.5) \n )\n self.lin_1 = Lin(256, 128)\n self.lin_2 = Lin(256, 128)\n\n self.lin_last = Lin(128, NUM_CLASS)\n\n def forward(self, data):\n sa0_b1_input = (data.x[:,3:], data.x[:,:3], data.batch)\n sa0_b2_input = (data.x2[:,3:], data.x2[:,:3], data.batch2)\n\n # Using less layers\n\n # sa1_b1_out = self.sa1_module(*sa0_b1_input)\n # sa2_b1_out = self.sa2_module(*sa1_b1_out)\n # sa3_b1_out = self.sa3_module(*sa2_b1_out)\n\n # sa1_b2_out = self.sa1_module(*sa0_b2_input)\n # sa2_b2_out = self.sa2_module(*sa1_b2_out)\n # sa3_b2_out = self.sa3_module(*sa2_b2_out)\n\n sa1_b1_out = self.sa1_module(*sa0_b1_input) \n sa3_b1_out = self.sa3_module(*sa1_b1_out)\n\n sa1_b2_out = self.sa1_module(*sa0_b2_input) \n sa3_b2_out = self.sa3_module(*sa1_b2_out)\n\n x1, pos1, _ = sa3_b1_out\n x2, pos2, _ = sa3_b2_out\n\n x = x1 - x2\n\n x = self.lin(x)\n x1, x2 = self.lin_1(x), self.lin_2(x)\n x_out = F.dropout(F.relu(x1-x2), p=0.6)\n\n x_out = self.lin_last(x_out)\n\n return F.log_softmax(x_out, dim=-1)\n\ndef train(epoch):\n model.train()\n confusion_matrix = ConfusionMatrix(NUM_CLASS+1)\n \n\n if True:\n correct = 0\n # i = 0\n for data in train_loader:\n data = data.to(device)\n optimizer.zero_grad()\n \n if True:\n # print(f\"Iter {i}\")\n # i += 1\n out = model(data)\n\n # if USING_FOCAL_LOSS:\n # loss = focal_loss(out, data.y, alpha=0.5, reduction='mean')\n if USING_CONTRASTIVE_LOSS:\n loss = criterion(out[0], out[1], data.y)\n else:\n loss = F.nll_loss(out, data.y)\n pred = out.max(1)[1]\n correct += pred.eq(data.y).sum().item()\n \n confusion_matrix.increment_from_list(data.y.cpu().detach().numpy() + 1, pred.cpu().detach().numpy() + 1)\n else:\n if USING_FOCAL_LOSS:\n loss = focal_loss(model(data), data.y, alpha=0.5, reduction='mean')\n else:\n loss = F.nll_loss(model(data), data.y)\n \n loss.backward()\n optimizer.step()\n \n train_acc = correct / len(train_loader.dataset)\n print('Epoch: {:03d}, Train: {:.4f}, per_class_acc: {}'.format(epoch, train_acc, confusion_matrix.get_per_class_accuracy()))\n\ndef test(loader):\n model.eval()\n confusion_matrix = ConfusionMatrix(NUM_CLASS+1)\n correct = 0\n for data in loader:\n data = data.to(device)\n with torch.no_grad():\n pred = model(data).max(1)[1]\n correct += pred.eq(data.y).sum().item()\n confusion_matrix.increment_from_list(data.y.cpu().detach().numpy() + 1, pred.cpu().detach().numpy() + 1)\n \n test_acc = correct / len(loader.dataset)\n print('Epoch: {:03d}, Test: {:.4f}, per_class_acc: {}'.format(epoch, test_acc, confusion_matrix.get_per_class_accuracy()))\n return test_acc, confusion_matrix.get_per_class_accuracy()\n\ndef inference(loader, path='best_pointnet_model.pth'):\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n model = Net().to(device)\n\n model.load_state_dict(torch.load(path))\n model.eval()\n\n correct = 0\n for data in loader:\n data = data.to(device)\n with torch.no_grad():\n pred = model(data).max(1)[1]\n correct += pred.eq(data.y).sum().item()\n return correct / len(loader.dataset)\n\n\nif __name__ == '__main__':\n \n path = '/home/kt/cyclomedia_disk/kt/shrec2021/data'\n\n if os.name == 'nt':\n path = 'F:/shrec2021/data' \n\n checkpoint_dir = 'checkpoints_main_pointnet'\n\n ignore_labels = [] # ['nochange']\n\n if USING_FOCAL_LOSS:\n print(\"Using focal loss!\")\n if USING_IMBALANCE_SAMPLING:\n print(\"Using imbalance over sampling!\")\n if USING_CONTRASTIVE_LOSS:\n print(\"Using contrastive loss!\")\n\n pre_transform, transform = NormalizeScale(), SamplePoints(1024)\n \n\n # train_dataset = ChangeDataset(path, train=True, clearance=3, transform=None, pre_transform=None)\n # train_dataset = ChangeDataset(path, train=True, clearance=3, transform=None, pre_transform=None)\n train_dataset = ChangeDataset(path, train=True, clearance=3, ignore_labels=ignore_labels, transform=transform, pre_transform=pre_transform)\n test_dataset = ChangeDataset(path, train=False, clearance=3, ignore_labels=ignore_labels, transform=transform, pre_transform=pre_transform)\n\n NUM_CLASS = len(train_dataset.class_labels)\n\n sampler = ImbalancedDatasetSampler(train_dataset)\n\n if USING_CONTRASTIVE_LOSS:\n criterion = ContrastiveLoss()\n\n # train_loader = DataLoader(train_dataset, batch_size=4, shuffle=False, num_workers=0)\n\n if not USING_IMBALANCE_SAMPLING:\n train_loader = MyDataLoader(train_dataset, batch_size=8, shuffle=True, num_workers=4)\n else:\n train_loader = MyDataLoader(train_dataset, batch_size=8, shuffle=False, num_workers=4, sampler=sampler)\n \n test_loader = MyDataLoader(test_dataset, batch_size=8, shuffle=False, num_workers=4)\n\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n if USING_CONTRASTIVE_LOSS:\n model = Net_con().to(device)\n else:\n model = Net_2().to(device)\n print(\"Using Net -> Net_2()\")\n # model = Net_cas().to(device)\n optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n\n scheduler = StepLR(optimizer, step_size=15, gamma=0.1)\n\n test_accs = []\n max_acc = 0\n epoch_best = 1\n for epoch in range(1, 201):\n train(epoch)\n test_acc, per_cls_acc = test(train_loader)\n\n scheduler.step()\n\n if test_acc > max_acc:\n torch.save(model.state_dict(), f'best_pointnet_model_{model.__class__.__name__}.pth')\n max_acc = test_acc\n epoch_best = epoch\n \n print('Epoch: {:03d}, get best acc: {:.4f}, per class acc: {}'.format(epoch_best, test_acc, per_cls_acc))\n \n\n # print(\"Breakpoint\")\n\n\n\n\n"
] |
[
[
"torch.nn.Dropout",
"torch.nn.functional.log_softmax",
"torch.nn.functional.dropout",
"torch.cat",
"torch.load",
"torch.nn.functional.nll_loss",
"torch.nn.Linear",
"torch.nn.functional.relu",
"torch.no_grad",
"torch.cuda.is_available",
"torch.nn.ReLU",
"torch.optim.lr_scheduler.StepLR"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
mhkarazeybek/Python
|
[
"8aa00ac15f9e198db421bbc05c37410b8d844632"
] |
[
"OpenCV/opticalFlow.py"
] |
[
"#mouse ile herhangi bir objenin üzerine tıklayıp hareket ettirebilirsiniz\nimport cv2\nimport numpy as np\n\ncap = cv2.VideoCapture(0)\n\n#Create old frame\n_, frame = cap.read()\noldGray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n# Lucas kanade params\nlkParams = dict(winSize=(15, 15),\n maxLevel=4,\n criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))\n\n# Mouse function\ndef selectPoint(event, x, y, flags, params):\n global point, pointSelected, oldPoints\n if event == cv2.EVENT_LBUTTONDOWN:\n point = (x, y)\n pointSelected = True\n oldPoints = np.array([[x, y]], dtype=np.float32)\n\ncv2.namedWindow(\"Frame\")\ncv2.setMouseCallback(\"Frame\", selectPoint)\n\npointSelected = False\npoint = ()\noldPoints = np.array([[]])\n\nwhile True:\n _, frame = cap.read()\n grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n if pointSelected is True:\n cv2.circle(frame, point, 5, (0, 0, 255), 2)\n\n newPoints, status, error = cv2.calcOpticalFlowPyrLK(oldGray, grayFrame, oldPoints, None, **lkParams)\n oldGray = grayFrame.copy()\n oldPoints = newPoints\n\n x, y = newPoints.ravel()\n cv2.circle(frame, (x, y), 5, (0, 255, 0), -1)\n\n\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1)\n if key == 27:\n break\n\ncap.release()\ncv2.destroyAllWindows()\n"
] |
[
[
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
pepelawycliffe/keras
|
[
"0f8da5a7b814cb37baba868fc11fe8b10b3d4cf8"
] |
[
"keras/layers/preprocessing/index_lookup_test.py"
] |
[
"# Copyright 2020 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\"\"\"Tests for Keras text vectorization preprocessing layer.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport tensorflow.compat.v2 as tf\n\nimport itertools\nimport os\nimport random\nimport string\n\nfrom absl.testing import parameterized\nimport numpy as np\n\nimport keras\nfrom keras import keras_parameterized\nfrom keras import testing_utils\nfrom keras.layers.preprocessing import index_lookup\nfrom keras.layers.preprocessing import index_lookup_v1\nfrom keras.layers.preprocessing import preprocessing_test_utils\nfrom keras.saving import save\nfrom keras.utils.generic_utils import CustomObjectScope\n\n\ndef get_layer_class():\n if tf.executing_eagerly():\n return index_lookup.IndexLookup\n else:\n return index_lookup_v1.IndexLookup\n\n\ndef _get_end_to_end_test_cases():\n test_cases = (\n {\n \"testcase_name\":\n \"test_strings_soft_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n },\n \"expected_output\": [[2], [3], [4], [5], [5], [4], [2], [1]],\n \"input_dtype\":\n tf.string\n },\n {\n \"testcase_name\":\n \"test_inverse_strings_soft_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[1], [2], [3], [4], [4], [3], [1], [5]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n \"invert\": True\n },\n \"expected_output\":\n np.array([[b\"earth\"], [b\"wind\"], [b\"and\"], [b\"fire\"], [b\"fire\"],\n [b\"and\"], [b\"earth\"], [b\"[OOV]\"]]),\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_strings_with_special_tokens\",\n # Mask and oov values in the vocab data should be dropped, and mapped\n # to 0 and 1 respectively when calling the layer.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"\"], [\"\"], [\"\"], [\"[OOV]\"], [\"[OOV]\"], [\"[OOV]\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"\"], [\"wind\"], [\"[OOV]\"], [\"and\"], [\"\"],\n [\"fire\"], [\"and\"], [\"[OOV]\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n },\n \"expected_output\": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],\n \"input_dtype\":\n tf.string\n },\n {\n \"testcase_name\":\n \"test_ints_soft_vocab_cap\",\n # Create an array where 1138 is the most frequent term, followed by\n # 1729, then 725, then 42. This ensures that the vocab accumulator\n # is sorting by frequency.\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],\n [1729], [725], [725]],\n dtype=np.int64),\n \"input_data\":\n np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],\n dtype=np.int64),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[2], [3], [4], [5], [5], [4], [2], [1]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_ints_with_special_tokens\",\n # Mask and oov values in the vocab data should be dropped, and mapped\n # to 0 and 1 respectively when calling the layer.\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [0], [0], [0],\n [-1], [-1], [-1], [1729], [1729], [1729], [725], [725]],\n dtype=np.int64),\n \"input_data\":\n np.array([[1138], [0], [1729], [-1], [725], [0], [42], [725],\n [-1], [4]],\n dtype=np.int64),\n \"kwargs\": {\n \"max_tokens\": None,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[2], [0], [3], [1], [4], [0], [5], [4], [1], [1]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_strings_hard_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n },\n \"expected_output\": [[2], [3], [4], [1], [1], [4], [2], [1]],\n \"input_dtype\":\n tf.string\n },\n {\n \"testcase_name\":\n \"test_inverse_strings_hard_vocab_cap\",\n # Create an array where 'earth' is the most frequent term, followed by\n # 'wind', then 'and', then 'fire'. This ensures that the vocab\n # accumulator is sorting by frequency.\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[1], [2], [3], [4], [4], [3], [1], [5]]),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"dtype\": tf.string,\n \"invert\": True\n },\n \"expected_output\":\n np.array([[b\"earth\"], [b\"wind\"], [b\"and\"], [b\"[OOV]\"], [b\"[OOV]\"],\n [b\"and\"], [b\"earth\"], [b\"[OOV]\"]]),\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_ints_hard_vocab_cap\",\n # Create an array where 1138 is the most frequent term, followed by\n # 1729, then 725, then 42. This ensures that the vocab accumulator\n # is sorting by frequency.\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],\n [1729], [725], [725]],\n dtype=np.int64),\n \"input_data\":\n np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]],\n dtype=np.int64),\n \"kwargs\": {\n \"max_tokens\": 5,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[2], [3], [4], [1], [1], [4], [2], [1]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_ints_tf_idf_output\",\n \"vocab_data\":\n np.array([[42], [1138], [1138], [1138], [1138], [1729], [1729],\n [1729], [725], [725]]),\n \"input_data\":\n np.array([[1138], [1729], [725], [42], [42], [725], [1138], [4]]),\n \"kwargs\": {\n \"max_tokens\": 6,\n \"num_oov_indices\": 1,\n \"mask_token\": 0,\n \"oov_token\": -1,\n \"output_mode\": index_lookup.TFIDF,\n \"dtype\": tf.int64,\n },\n \"expected_output\": [[0, 0, 1.098612, 0, 0, 0],\n [0, 0, 0, 1.252763, 0, 0],\n [0, 0, 0, 0, 1.466337, 0],\n [0, 0, 0, 0, 0, 1.7917595],\n [0, 0, 0, 0, 0, 1.7917595],\n [0, 0, 0, 0, 1.4663371, 0],\n [0, 0, 1.098612, 0, 0, 0],\n [0, 1.402368, 0, 0, 0, 0]],\n \"input_dtype\":\n tf.int64\n },\n {\n \"testcase_name\":\n \"test_strings_tf_idf_output\",\n \"vocab_data\":\n np.array([[\"fire\"], [\"earth\"], [\"earth\"], [\"earth\"], [\"earth\"],\n [\"wind\"], [\"wind\"], [\"wind\"], [\"and\"], [\"and\"]]),\n \"input_data\":\n np.array([[\"earth\"], [\"wind\"], [\"and\"], [\"fire\"], [\"fire\"],\n [\"and\"], [\"earth\"], [\"michigan\"]]),\n \"kwargs\": {\n \"max_tokens\": 6,\n \"num_oov_indices\": 1,\n \"mask_token\": \"\",\n \"oov_token\": \"[OOV]\",\n \"output_mode\": index_lookup.TFIDF,\n \"dtype\": tf.string,\n },\n \"expected_output\": [[0, 0, 1.098612, 0, 0, 0],\n [0, 0, 0, 1.252763, 0, 0],\n [0, 0, 0, 0, 1.466337, 0],\n [0, 0, 0, 0, 0, 1.7917595],\n [0, 0, 0, 0, 0, 1.7917595],\n [0, 0, 0, 0, 1.4663371, 0],\n [0, 0, 1.098612, 0, 0, 0],\n [0, 1.402368, 0, 0, 0, 0]],\n \"input_dtype\":\n tf.string\n },\n )\n\n crossed_test_cases = []\n # Cross above test cases with use_dataset in (True, False)\n for use_dataset in (True, False):\n for case in test_cases:\n case = case.copy()\n if use_dataset:\n case[\"testcase_name\"] = case[\"testcase_name\"] + \"_with_dataset\"\n case[\"use_dataset\"] = use_dataset\n crossed_test_cases.append(case)\n\n return crossed_test_cases\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupLayerTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n @parameterized.named_parameters(*_get_end_to_end_test_cases())\n def test_layer_end_to_end_with_adapt(self, vocab_data, input_data, kwargs,\n use_dataset, expected_output,\n input_dtype):\n cls = get_layer_class()\n if \"invert\" in kwargs and kwargs[\"invert\"]:\n expected_output_dtype = kwargs[\"dtype\"]\n elif \"output_mode\" in kwargs and kwargs[\"output_mode\"] != index_lookup.INT:\n expected_output_dtype = tf.float32\n else:\n expected_output_dtype = tf.int64\n\n input_shape = input_data.shape\n\n if use_dataset:\n # Keras APIs expect batched datasets.\n # TODO(rachelim): `model.predict` predicts the result on each\n # dataset batch separately, then tries to concatenate the results\n # together. When the results have different shapes on the non-concat\n # axis (which can happen in the output_mode = INT case for\n # IndexLookup), the concatenation fails. In real use cases, this may\n # not be an issue because users are likely to pipe the preprocessing layer\n # into other keras layers instead of predicting it directly. A workaround\n # for these unit tests is to have the dataset only contain one batch, so\n # no concatenation needs to happen with the result. For consistency with\n # numpy input, we should make `predict` join differently shaped results\n # together sensibly, with 0 padding.\n input_data = tf.data.Dataset.from_tensor_slices(input_data).batch(\n input_shape[0])\n vocab_data = tf.data.Dataset.from_tensor_slices(vocab_data).batch(\n input_shape[0])\n\n with CustomObjectScope({\"IndexLookup\": cls}):\n output_data = testing_utils.layer_test(\n cls,\n kwargs=kwargs,\n input_shape=input_shape,\n input_data=input_data,\n input_dtype=input_dtype,\n expected_output_dtype=expected_output_dtype,\n validate_training=False,\n adapt_data=vocab_data)\n if \"invert\" in kwargs and kwargs[\"invert\"]:\n self.assertAllEqual(expected_output, output_data)\n else:\n self.assertAllClose(expected_output, output_data)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoricalEncodingInputTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_sparse_string_input(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=[\"fire\", \"michigan\"],\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [5, 1]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_sparse_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=np.array([13, 32], dtype=np.int64),\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [5, 1]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_ragged_string_input(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant(\n [[\"earth\", \"wind\", \"fire\"], [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],\n dtype=np.int64)\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int32_input_with_int64_keys(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],\n dtype=np.int32)\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int32, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoricalEncodingMultiOOVTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_sparse_string_input_multi_bucket(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=[\"fire\", \"ohio\"],\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [6, 2]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=2,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_sparse_int_input_multi_bucket(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=np.array([13, 133], dtype=np.int64),\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [6, 2]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=2,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_ragged_string_input_multi_bucket(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant(\n [[\"earth\", \"wind\", \"fire\"], [\"fire\", \"and\", \"earth\", \"ohio\"]])\n expected_output = [[3, 4, 6], [6, 5, 3, 2]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=2,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_int_input_multi_bucket(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 133]],\n dtype=np.int64)\n expected_output = [[3, 4, 6], [6, 5, 3, 2]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=2,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n\n@keras_parameterized.run_all_keras_modes\nclass CategoricalEncodingAdaptTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_sparse_adapt(self):\n vocab_data = tf.SparseTensor(\n indices=[[0, 0], [0, 1], [1, 2]],\n values=[\"michigan\", \"fire\", \"michigan\"],\n dense_shape=[3, 4])\n vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)\n\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.adapt(vocab_dataset)\n expected_vocabulary = [\"\", \"[OOV]\", \"michigan\", \"fire\"]\n self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())\n\n def test_ragged_adapt(self):\n vocab_data = tf.ragged.constant([[\"michigan\"],\n [\"fire\", \"michigan\"]])\n vocab_dataset = tf.data.Dataset.from_tensors(vocab_data)\n\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.adapt(vocab_dataset)\n expected_vocabulary = [\"\", \"[OOV]\", \"michigan\", \"fire\"]\n self.assertAllEqual(expected_vocabulary, layer.get_vocabulary())\n\n def test_sparse_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.SparseTensor(\n indices=[[0, 0], [1, 2]],\n values=np.array([13, 32], dtype=np.int64),\n dense_shape=[3, 4])\n\n expected_indices = [[0, 0], [1, 2]]\n expected_values = [5, 1]\n expected_dense_shape = [3, 4]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, sparse=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_data = model.predict(input_array, steps=1)\n self.assertAllEqual(expected_indices, output_data.indices)\n self.assertAllEqual(expected_values, output_data.values)\n self.assertAllEqual(expected_dense_shape, output_data.dense_shape)\n\n def test_ragged_string_input(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = tf.ragged.constant(\n [[\"earth\", \"wind\", \"fire\"], [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_ragged_int_input(self):\n vocab_data = np.array([10, 11, 12, 13], dtype=np.int64)\n input_array = tf.ragged.constant([[10, 11, 13], [13, 12, 10, 42]],\n dtype=np.int64)\n expected_output = [[2, 3, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64, ragged=True)\n layer = get_layer_class()(\n max_tokens=None,\n dtype=tf.int64,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_single_string_generator_dataset(self):\n\n def word_gen():\n for _ in itertools.count(1):\n yield \"\".join(random.choice(string.ascii_letters) for i in range(2))\n\n ds = tf.data.Dataset.from_generator(word_gen, tf.string,\n tf.TensorShape([]))\n batched_ds = ds.take(2)\n input_t = keras.Input(shape=(), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=10,\n num_oov_indices=0,\n mask_token=None,\n oov_token=None,\n dtype=tf.string)\n _ = layer(input_t)\n layer.adapt(batched_ds)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupOutputTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def _write_to_temp_file(self, file_name, vocab_list):\n vocab_path = os.path.join(self.get_temp_dir(), file_name + \".txt\")\n with tf.io.gfile.GFile(vocab_path, \"w\") as writer:\n for vocab in vocab_list:\n writer.write(vocab + \"\\n\")\n writer.flush()\n writer.close()\n return vocab_path\n\n def test_int_output(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n self.assertAllEqual(int_data.shape.as_list(), [16, 4])\n\n def test_int_output_no_reserved_zero(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[1, 2, 3, 4], [4, 3, 1, 0]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=None,\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_hard_maximum(self):\n \"\"\"Check binary output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\", \"\"],\n [\"fire\", \"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [\n [1, 0, 1, 1, 1, 1, 0],\n [0, 1, 1, 0, 1, 1, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=7,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n binary_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=binary_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_soft_maximum(self):\n \"\"\"Check binary output when pad_to_max_tokens=False.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [\n [0, 0, 1, 1, 1, 1],\n [0, 1, 1, 0, 1, 1],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n binary_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=binary_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_binary_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n binary_data = layer(input_data)\n self.assertAllEqual(binary_data.shape.as_list(), [16, 2])\n\n def test_count_output_hard_maxiumum(self):\n \"\"\"Check count output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"wind\"],\n [\"fire\", \"fire\", \"fire\", \"michigan\"]])\n expected_output = [\n [0, 0, 1, 2, 1, 0, 0],\n [0, 1, 0, 0, 0, 3, 0],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=7,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n count_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=count_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_count_output_soft_maximum(self):\n \"\"\"Check count output when pad_to_max_tokens=False.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"wind\"],\n [\"fire\", \"fire\", \"fire\", \"michigan\"]])\n expected_output = [\n [0, 0, 1, 2, 1, 0],\n [0, 1, 0, 0, 0, 3],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n count_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=count_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_count_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n dtype=tf.string)\n count_data = layer(input_data)\n self.assertAllEqual(count_data.shape.as_list(), [16, 2])\n\n def test_ifidf_output_hard_maximum(self):\n \"\"\"Check tf-idf output when pad_to_max_tokens=True.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # OOV idf weight (bucket 0) should 0.5, the average of passed weights.\n idf_weights = [.4, .25, .75, .6]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\"]])\n expected_output = [\n [0.00, 0.00, 0.80, 0.25, 0.75, 0.00, 0.00],\n [0.00, 1.00, 0.40, 0.00, 0.00, 0.60, 0.00],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=7,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n pad_to_max_tokens=True,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data, idf_weights=idf_weights)\n layer_output = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=layer_output)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_ifidf_output_soft_maximum(self):\n \"\"\"Check tf-idf output when pad_to_max_tokens=False.\"\"\"\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n # OOV idf weight (bucket 0) should 0.5, the average of passed weights.\n idf_weights = [.4, .25, .75, .6]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"earth\"],\n [\"ohio\", \"fire\", \"earth\", \"michigan\"]])\n expected_output = [\n [0.00, 0.00, 0.80, 0.25, 0.75, 0.00],\n [0.00, 1.00, 0.40, 0.00, 0.00, 0.60],\n ]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data, idf_weights=idf_weights)\n layer_output = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=layer_output)\n output_dataset = model.predict(input_array)\n self.assertAllClose(expected_output, output_dataset)\n\n def test_ifidf_output_shape(self):\n input_data = keras.Input(batch_size=16, shape=(4,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=2,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.COUNT,\n dtype=tf.string)\n layer_output = layer(input_data)\n self.assertAllEqual(layer_output.shape.as_list(), [16, 2])\n\n def test_int_output_file_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]\n\n vocab_file = self._write_to_temp_file(\"temp\", vocab_data)\n vocabulary_initializer = tf.lookup.TextFileInitializer(\n filename=vocab_file,\n key_dtype=tf.string,\n key_index=tf.lookup.TextFileIndex.WHOLE_LINE,\n value_dtype=tf.int64,\n value_index=tf.lookup.TextFileIndex.LINE_NUMBER,\n value_index_offset=2)\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocabulary_initializer,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_int_file_vocab(self):\n vocab_data = [\"10\", \"20\", \"30\", \"40\"]\n input_array = np.array([[10, 20, 30, 40], [40, 0, 10, 42]])\n expected_output = [[2, 3, 4, 5], [5, 0, 2, 1]]\n\n vocab_file = self._write_to_temp_file(\"temp\", vocab_data)\n vocabulary_initializer = tf.lookup.TextFileInitializer(\n filename=vocab_file,\n key_dtype=tf.int64,\n key_index=tf.lookup.TextFileIndex.WHOLE_LINE,\n value_dtype=tf.int64,\n value_index=tf.lookup.TextFileIndex.LINE_NUMBER,\n value_index_offset=2)\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64)\n layer = get_layer_class()(\n vocabulary=vocabulary_initializer,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupVocabularyTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_int_output_explicit_vocab_with_special_tokens(self):\n vocab_data = [\"\", \"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_vocab_with_max_cap(self):\n vocab_data = [\"\", \"[OOV]\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_int_vocab_with_max_cap(self):\n vocab_data = [0, -1, 42, 1276, 1138]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_non_unique_vocab_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\", \"fire\"]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*fire.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n\n def test_vocab_with_oov_and_wrong_mask_fails(self):\n vocab_data = [\"custom_mask\", \"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*does not have the mask token.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_oov_and_no_mask_fails(self):\n vocab_data = [\"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*Reserved OOV.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_mask_but_no_oov_fails(self):\n vocab_data = [\"\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*does not have the OOV token.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_repeated_element_fails(self):\n vocab_data = [\"earth\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*earth.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_reserved_oov_element_fails(self):\n vocab_data = [\"earth\", \"test\", \"[OOV]\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*Reserved OOV.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_reserved_mask_element_fails(self):\n vocab_data = [\"earth\", \"mask_token\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"mask_token\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError, \".*Reserved mask.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_set_after_call_pad_to_max_false_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n pad_to_max_tokens=False,\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n # Calling the layer should lock the vocabulary.\n _ = layer([[\"earth\"]])\n with self.assertRaisesRegex(RuntimeError, \"vocabulary cannot be changed\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_idf_weights_non_tfidf_output_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n weight_data = [1, 1, 1, 1, 1]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.BINARY,\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError,\n \".*idf_weights should only be set if.*\"):\n layer.set_vocabulary(vocab_data, idf_weights=weight_data)\n\n def test_vocab_with_idf_weights_length_mismatch_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n weight_data = [1, 1, 1, 1, 1] # too long\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n dtype=tf.string)\n with self.assertRaisesRegex(\n ValueError, \"idf_weights must be the same length as vocab.*\"):\n layer.set_vocabulary(vocab_data, idf_weights=weight_data)\n\n def test_vocab_without_idf_weights_tfidf_output_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n output_mode=index_lookup.TFIDF,\n dtype=tf.string)\n with self.assertRaisesRegex(\n ValueError, \"idf_weights must be set if output_mode is TFIDF\"):\n layer.set_vocabulary(vocab_data)\n\n def test_non_unique_int_vocab_fails(self):\n vocab_data = [12, 13, 14, 15, 15]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*15.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n\n def test_int_vocab_with_oov_and_wrong_mask_fails(self):\n vocab_data = [1234, -1, 11, 21, 13, 14]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \".*does not have the mask token.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_oov_and_no_mask_fails(self):\n vocab_data = [-1, 11, 12, 13, 14]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \".*Reserved OOV.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_mask_but_no_oov_fails(self):\n vocab_data = [0, 11, 12, 13, 14]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \".*does not have the OOV token.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_repeated_element_fails(self):\n vocab_data = [11, 11, 34, 23, 124]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*11.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_reserved_oov_element_fails(self):\n vocab_data = [14, 38, -1, 34, 3, 84]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \".*Reserved OOV.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_int_vocab_with_reserved_mask_element_fails(self):\n vocab_data = [125, 0, 3, 4, 94]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64)\n with self.assertRaisesRegex(ValueError, \".*Reserved mask.*\"):\n layer.set_vocabulary(vocab_data)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupInverseVocabularyTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_int_output_explicit_vocab(self):\n vocab_data = [\"[OOV]\", \"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[2, 3, 4, 5], [5, 4, 2, 1]])\n expected_output = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"[OOV]\"]])\n\n input_data = keras.Input(shape=(None,), dtype=tf.int64)\n layer = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(expected_output, output_dataset)\n\n def test_vocab_with_max_cap(self):\n vocab_data = [\"\", \"[OOV]\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_int_vocab_with_max_cap(self):\n vocab_data = [0, -1, 42, 1276, 1138]\n layer = get_layer_class()(\n max_tokens=5,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64,\n invert=True)\n layer.set_vocabulary(vocab_data)\n returned_vocab = layer.get_vocabulary()\n self.assertAllEqual(vocab_data, returned_vocab)\n\n def test_non_unique_vocab_fails(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\", \"fire\"]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*fire.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n\n def test_vocab_with_repeated_element_fails(self):\n vocab_data = [\"earth\", \"earth\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*earth.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_vocab_with_reserved_mask_element_fails(self):\n vocab_data = [\"earth\", \"mask_token\", \"wind\", \"and\", \"fire\"]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"mask_token\",\n oov_token=\"[OOV]\",\n dtype=tf.string,\n invert=True)\n with self.assertRaisesRegex(ValueError, \".*Reserved mask.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_non_unique_int_vocab_fails(self):\n vocab_data = [12, 13, 14, 15, 15]\n with self.assertRaisesRegex(ValueError, \".*repeated term.*15.*\"):\n _ = get_layer_class()(\n vocabulary=vocab_data,\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64,\n invert=True)\n\n def test_int_vocab_with_repeated_element_fails(self):\n vocab_data = [11, 11, 34, 23, 124]\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=0,\n oov_token=-1,\n dtype=tf.int64,\n invert=True)\n with self.assertRaisesRegex(ValueError, \".*repeated term.*11.*\"):\n layer.set_vocabulary(vocab_data)\n\n\n@keras_parameterized.run_all_keras_modes(always_skip_eager=True)\nclass IndexLookupSaveableTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_ops_are_not_added_with_multiple_get_set_weights(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=10,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n weights = model.get_weights()\n model.set_weights(weights)\n keras.backend.get_session().graph.finalize()\n weights = model.get_weights()\n model.set_weights(weights)\n\n def test_layer_saving_with_h5(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=10,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n path = os.path.join(self.get_temp_dir(), \"model\")\n with self.assertRaisesRegex(NotImplementedError,\n \"Save or restore weights that is not.*\"):\n save.save_model(model, path, save_format=\"h5\")\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupErrorTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_too_long_vocab_fails_in_single_setting(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n\n layer = get_layer_class()(\n max_tokens=4,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n with self.assertRaisesRegex(ValueError,\n \"vocabulary larger than the maximum vocab.*\"):\n layer.set_vocabulary(vocab_data)\n\n def test_zero_max_tokens_fails(self):\n with self.assertRaisesRegex(ValueError, \".*max_tokens.*\"):\n _ = get_layer_class()(\n max_tokens=0,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupSavingTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def test_vocabulary_persistence_across_saving(self):\n vocab_data = [\"earth\", \"wind\", \"and\", \"fire\"]\n input_array = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"fire\", \"and\", \"earth\", \"michigan\"]])\n expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]\n\n # Build and validate a golden model.\n input_data = keras.Input(shape=(None,), dtype=tf.string)\n layer = get_layer_class()(\n max_tokens=None,\n num_oov_indices=1,\n mask_token=\"\",\n oov_token=\"[OOV]\",\n dtype=tf.string)\n layer.set_vocabulary(vocab_data)\n int_data = layer(input_data)\n model = keras.Model(inputs=input_data, outputs=int_data)\n output_dataset = model.predict(input_array)\n self.assertAllEqual(output_dataset, expected_output)\n\n # Save the model to disk.\n output_path = os.path.join(self.get_temp_dir(), \"tf_keras_saved_model\")\n model.save(output_path, save_format=\"tf\")\n\n # Delete the session and graph to ensure that the loaded model is generated\n # from scratch.\n # TODO(b/149526183): Can't clear session when TF2 is disabled.\n if tf.__internal__.tf2.enabled():\n keras.backend.clear_session()\n\n loaded_model = keras.models.load_model(\n output_path, custom_objects={\"IndexLookup\": get_layer_class()})\n\n # Ensure that the loaded model is unique (so that the save/load is real)\n self.assertIsNot(model, loaded_model)\n\n # Validate correctness of the new model.\n new_output_dataset = loaded_model.predict(input_array)\n self.assertAllEqual(new_output_dataset, expected_output)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupStringCombinerTest(\n keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest):\n\n def compare_text_accumulators(self, a, b, msg=None):\n if a is None or b is None:\n self.assertAllEqual(a, b, msg=msg)\n\n self.assertAllEqual(a.count_dict, b.count_dict, msg=msg)\n\n compare_accumulators = compare_text_accumulators\n\n def update_accumulator(self, accumulator, data):\n accumulator.count_dict.update(dict(zip(data[\"vocab\"], data[\"counts\"])))\n\n return accumulator\n\n def test_combiner_api_compatibility_int_mode(self):\n data = np.array([[\"earth\", \"wind\", \"and\", \"fire\"],\n [\"earth\", \"wind\", \"and\", \"michigan\"]])\n combiner = index_lookup._IndexLookupCombiner()\n expected_accumulator_output = {\n \"vocab\": np.array([\"and\", \"earth\", \"wind\", \"fire\", \"michigan\"]),\n \"counts\": np.array([2, 2, 2, 1, 1]),\n }\n expected_extract_output = {\n \"vocab\": np.array([\"wind\", \"earth\", \"and\", \"michigan\", \"fire\"]),\n \"idf_weights\": None,\n }\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_serialize_and_deserialize(combiner, data,\n expected_accumulator)\n self.validate_accumulator_uniqueness(combiner, data)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n # TODO(askerryryan): Add tests confirming equivalence to behavior of\n # existing tf.keras.preprocessing.text.Tokenizer.\n @parameterized.named_parameters(\n {\n \"testcase_name\":\n \"top_k_smaller_than_full_vocab\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\"]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\":\n \"top_k_larger_than_full_vocab\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n 10,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\":\n \"no_top_k\",\n \"data\":\n np.array([[\"earth\", \"wind\"], [\"fire\", \"wind\"], [\"and\"],\n [\"fire\", \"wind\"]]),\n \"vocab_size\":\n None,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\", \"and\"]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"single_element_per_row\",\n \"data\": np.array([[\"earth\"], [\"wind\"], [\"fire\"], [\"wind\"], [\"and\"]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"and\", \"earth\", \"fire\"]),\n \"counts\": np.array([2, 1, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\"]),\n \"idf_weights\": None,\n },\n },\n # Which tokens are retained are based on global frequency, and thus are\n # sensitive to frequency within a document. In contrast, because idf only\n # considers the presence of a token in a document, it is insensitive\n # to the frequency of the token within the document.\n {\n \"testcase_name\":\n \"retained_tokens_sensitive_to_within_document_frequency\",\n \"data\":\n np.array([[\"earth\", \"earth\"], [\"wind\", \"wind\"], [\"fire\", \"fire\"],\n [\"wind\", \"wind\"], [\"and\", \"michigan\"]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([\"wind\", \"earth\", \"fire\", \"and\", \"michigan\"]),\n \"counts\": np.array([4, 2, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([\"wind\", \"fire\", \"earth\"]),\n \"idf_weights\": None,\n },\n })\n def test_combiner_computation(self, data, vocab_size,\n expected_accumulator_output,\n expected_extract_output):\n combiner = index_lookup._IndexLookupCombiner(vocab_size=vocab_size)\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_computation(combiner, data, expected_accumulator)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n\n@keras_parameterized.run_all_keras_modes\nclass IndexLookupIntCombinerTest(keras_parameterized.TestCase,\n preprocessing_test_utils.PreprocessingLayerTest\n ):\n\n def compare_text_accumulators(self, a, b, msg=None):\n if a is None or b is None:\n self.assertAllEqual(a, b, msg=msg)\n\n self.assertAllEqual(a.count_dict, b.count_dict, msg=msg)\n\n compare_accumulators = compare_text_accumulators\n\n def update_accumulator(self, accumulator, data):\n accumulator.count_dict.update(dict(zip(data[\"vocab\"], data[\"counts\"])))\n\n return accumulator\n\n def test_combiner_api_compatibility_int_mode(self):\n data = np.array([[42, 1138, 725, 1729], [42, 1138, 725, 203]])\n combiner = index_lookup._IndexLookupCombiner()\n expected_accumulator_output = {\n \"vocab\": np.array([1138, 725, 42, 1729, 203]),\n \"counts\": np.array([2, 2, 2, 1, 1]),\n }\n expected_extract_output = {\n \"vocab\": np.array([1138, 725, 42, 1729, 203]),\n \"idf_weights\": None,\n }\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_serialize_and_deserialize(combiner, data,\n expected_accumulator)\n self.validate_accumulator_uniqueness(combiner, data)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n # TODO(askerryryan): Add tests confirming equivalence to behavior of\n # existing tf.keras.preprocessing.text.Tokenizer.\n @parameterized.named_parameters(\n {\n \"testcase_name\": \"top_k_smaller_than_full_vocab\",\n \"data\": np.array([[42, 1138], [1729, 1138], [725], [1729, 1138]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"top_k_larger_than_full_vocab\",\n \"data\": np.array([[42, 1138], [1729, 1138], [725], [1729, 1138]]),\n \"vocab_size\": 10,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"no_top_k\",\n \"data\": np.array([[42, 1138], [1729, 1138], [725], [1729, 1138]]),\n \"vocab_size\": None,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([3, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"idf_weights\": None,\n },\n },\n {\n \"testcase_name\": \"single_element_per_row\",\n \"data\": np.array([[42], [1138], [1729], [1138], [725]]),\n \"vocab_size\": 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 1729, 725, 42]),\n \"counts\": np.array([2, 1, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 725]),\n \"idf_weights\": None,\n },\n },\n # Which tokens are retained are based on global frequency, and thus are\n # sensitive to frequency within a document. In contrast, because idf only\n # considers the presence of a token in a document, it is insensitive\n # to the frequency of the token within the document.\n {\n \"testcase_name\":\n \"retained_tokens_sensitive_to_within_document_frequency\",\n \"data\":\n np.array([[42, 42], [1138, 1138], [1729, 1729], [1138, 1138],\n [725, 203]]),\n \"vocab_size\":\n 3,\n \"expected_accumulator_output\": {\n \"vocab\": np.array([1138, 42, 1729, 725, 203]),\n \"counts\": np.array([4, 2, 2, 1, 1]),\n },\n \"expected_extract_output\": {\n \"vocab\": np.array([1138, 1729, 42]),\n \"idf_weights\": None,\n },\n })\n def test_combiner_computation(self, data, vocab_size,\n expected_accumulator_output,\n expected_extract_output):\n combiner = index_lookup._IndexLookupCombiner(vocab_size=vocab_size)\n expected_accumulator = combiner._create_accumulator()\n expected_accumulator = self.update_accumulator(expected_accumulator,\n expected_accumulator_output)\n self.validate_accumulator_computation(combiner, data, expected_accumulator)\n self.validate_accumulator_extract(combiner, data, expected_extract_output)\n\n\nif __name__ == \"__main__\":\n tf.test.main()\n"
] |
[
[
"tensorflow.compat.v2.TensorShape",
"tensorflow.compat.v2.data.Dataset.from_tensor_slices",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.test.main",
"tensorflow.compat.v2.io.gfile.GFile",
"tensorflow.compat.v2.ragged.constant",
"tensorflow.compat.v2.lookup.TextFileInitializer",
"tensorflow.compat.v2.__internal__.tf2.enabled",
"tensorflow.compat.v2.SparseTensor",
"numpy.array",
"tensorflow.compat.v2.data.Dataset.from_tensors"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
HDembinski/mplhep
|
[
"5ae7601bd8922074dfc1ee92fc81f590a9efa7d5"
] |
[
"tests/test.py"
] |
[
"import os\nimport sys\nimport pytest\nimport matplotlib.pyplot as plt\nimport numpy as np\n\nos.environ[\"RUNNING_PYTEST\"] = \"true\"\n\nimport mplhep as hep # noqa\n\n\"\"\"\nTo test run:\npy.test --mpl\n\nWhen adding new tests, run:\npy.test --mpl-generate-path=tests/baseline\n\"\"\"\n\nplt.switch_backend(\"Agg\")\n\n\[email protected]_image_compare(style='default', remove_text=True)\ndef test_basic():\n fig, ax = plt.subplots(figsize=(10, 10))\n h = [1, 3, 2]\n bins = [0, 1, 2, 3]\n hep.histplot(h, bins, yerr=True, label='X')\n ax.legend()\n return fig\n\n\[email protected]_image_compare(style='default', remove_text=True)\ndef test_histplot():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n axs[0].set_title(\"Default\", fontsize=18)\n hep.histplot(h, bins, ax=axs[0])\n\n axs[1].set_title(\"Plot Edges\", fontsize=18)\n hep.histplot(h, bins, edges=True, ax=axs[1])\n\n axs[2].set_title(\"Plot Errorbars\", fontsize=18)\n hep.histplot(h, bins, yerr=np.sqrt(h), ax=axs[2])\n\n axs[3].set_title(\"Filled Histogram\", fontsize=18)\n hep.histplot(h, bins, histtype='fill', ax=axs[3])\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\[email protected]_image_compare(style='default', remove_text=True)\ndef test_histplot_density():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n axs[0].set_title(\"Default\", fontsize=18)\n hep.histplot(h, bins, ax=axs[0], density=True)\n\n axs[1].set_title(\"Plot Edges\", fontsize=18)\n hep.histplot(h, bins, edges=True, ax=axs[1], density=True)\n\n axs[2].set_title(\"Plot Errorbars\", fontsize=18)\n hep.histplot(h, bins, yerr=np.sqrt(h), ax=axs[2], density=True)\n\n axs[3].set_title(\"Filled Histogram\", fontsize=18)\n hep.histplot(h, bins, histtype='fill', ax=axs[3], density=True)\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\[email protected]_image_compare(style='default', remove_text=True)\ndef test_histplot_multiple():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n axs[0].set_title(\"Default Overlay\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, ax=axs[0])\n\n axs[1].set_title(\"Default Overlay w/ Errorbars\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, yerr=[np.sqrt(h), np.sqrt(1.5 * h)], ax=axs[1])\n\n axs[2].set_title(\"Automatic Errorbars\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, yerr=True, ax=axs[2])\n\n axs[3].set_title(\"With Labels\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, yerr=True, ax=axs[3], label=[\"First\", \"Second\"])\n axs[3].legend(fontsize=16, prop={'family': 'Tex Gyre Heros'})\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\[email protected]_image_compare(style='default', remove_text=True)\ndef test_histplot_stack():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 400), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n axs[0].set_title(\"Default\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, stack=True, ax=axs[0])\n\n axs[1].set_title(\"Plot Edges\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, edges=True, stack=True, ax=axs[1])\n\n axs[2].set_title(\"Plot Errorbars\", fontsize=18)\n hep.histplot([h, 1.5 * h], bins, yerr=np.sqrt(h), stack=True, ax=axs[2])\n\n axs[3].set_title(\"Filled Histogram\", fontsize=18)\n hep.histplot([1.5 * h, h], bins, histtype='fill', stack=True, ax=axs[3])\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\[email protected]_image_compare(style='default', remove_text=True)\ndef test_hist2dplot():\n np.random.seed(0)\n xedges = np.arange(0, 11.5, 1.5)\n yedges = [0, 2, 3, 4, 6, 7]\n x = np.random.normal(5, 1.5, 100)\n y = np.random.normal(4, 1, 100)\n H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges))\n\n fig, ax = plt.subplots()\n hep.hist2dplot(H, xedges, yedges, labels=True)\n return fig\n\n\[email protected]_image_compare(style='default', remove_text=True)\ndef test_histplot_kwargs():\n np.random.seed(0)\n h, bins = np.histogram(np.random.normal(10, 3, 1000), bins=10)\n\n fig, axs = plt.subplots(2, 2, sharex=True, sharey=True, figsize=(10, 10))\n axs = axs.flatten()\n\n hep.histplot([h * 2, h * 1, h * 0.5], bins, label=[\"1\", \"2\", \"3\"], stack=True,\n histtype=\"step\", linestyle=\"--\",\n color=[\"green\", \"black\", (1, 0, 0, .4)],\n ax=axs[0])\n axs[0].legend()\n\n hep.histplot([h, h, h], bins, label=[\"1\", \"2\", \"3\"], stack=True,\n histtype=\"step\", linestyle=[\"--\", ':'],\n color=(1, 0, 0, .8),\n ax=axs[1])\n axs[1].legend()\n\n hep.histplot([h, h, h], bins, label=[\"1\", \"2\", \"3\"], histtype=\"step\",\n weights=[0.5 * np.ones_like(h), 3 * np.ones_like(h),\n 6 * np.ones_like(h)],\n linestyle=[\"--\", ':'],\n color=(1, 0, 0, .8),\n ax=axs[2])\n axs[2].legend()\n\n hep.histplot([h, h, h], bins, label=[\"1\", \"2\", \"3\"], histtype=\"fill\",\n weights=[0.5 * np.ones_like(h), 3 * np.ones_like(h),\n 6 * np.ones_like(h)],\n linestyle=[\"--\", ':'],\n color=[\"green\", \"darkorange\", 'red'],\n alpha=[0.4, 0.7, 0.2],\n ax=axs[3])\n axs[3].legend()\n\n fig.subplots_adjust(hspace=0.1, wspace=0.1)\n return fig\n\n\n# Compare styles\[email protected](sys.platform != \"linux\", reason=\"Linux only\")\[email protected]_image_compare(style='default', remove_text=False)\ndef test_style_atlas():\n import mplhep as hep\n import matplotlib.pyplot as plt\n plt.rcParams.update(plt.rcParamsDefault)\n\n # Test suite does not have Helvetica\n plt.style.use([hep.style.ATLAS, {\"font.sans-serif\": [\"Tex Gyre Heros\"]}])\n fig, ax = plt.subplots()\n hep.atlas.text()\n\n plt.rcParams.update(plt.rcParamsDefault)\n return fig\n\[email protected](sys.platform != \"linux\", reason=\"Linux only\")\[email protected]_image_compare(style='default', remove_text=False)\ndef test_style_cms():\n import mplhep as hep\n import matplotlib.pyplot as plt\n plt.rcParams.update(plt.rcParamsDefault)\n\n plt.style.use(hep.style.CMS)\n fig, ax = plt.subplots()\n hep.cms.text()\n\n plt.rcParams.update(plt.rcParamsDefault)\n return fig\n\n\[email protected](sys.platform != \"linux\", reason=\"Linux only\")\[email protected]_image_compare(style='default', remove_text=False)\ndef test_style_lhcb():\n import mplhep as hep\n import matplotlib.pyplot as plt\n plt.rcParams.update(plt.rcParamsDefault)\n\n plt.style.use([hep.style.LHCb, \n {\"figure.autolayout\": False}\n ])\n fig, ax = plt.subplots()\n # Doesn't work for now\n # hep.lhcb.text()\n plt.rcParams.update(plt.rcParamsDefault)\n return fig\n"
] |
[
[
"numpy.ones_like",
"numpy.sqrt",
"numpy.random.seed",
"matplotlib.pyplot.switch_backend",
"numpy.arange",
"matplotlib.pyplot.subplots",
"numpy.random.normal",
"matplotlib.pyplot.rcParams.update",
"matplotlib.pyplot.style.use",
"numpy.histogram2d"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sridhar0605/cnvkit
|
[
"f4ccc908bb3fb07ee701ace63f1f3702ef109f2d"
] |
[
"skgenome/gary.py"
] |
[
"\"\"\"Base class for an array of annotated genomic regions.\"\"\"\nimport logging\nfrom collections import OrderedDict\n\nimport numpy as np\nimport pandas as pd\n\nfrom .chromsort import sorter_chrom\nfrom .intersect import by_ranges, into_ranges, iter_ranges, iter_slices\nfrom .merge import flatten, merge\nfrom .rangelabel import to_label\nfrom .subtract import subtract\nfrom .subdivide import subdivide\n\n\nclass GenomicArray(object):\n \"\"\"An array of genomic intervals. Base class for genomic data structures.\n\n Can represent most BED-like tabular formats with arbitrary additional\n columns.\n \"\"\"\n _required_columns = (\"chromosome\", \"start\", \"end\")\n _required_dtypes = (str, int, int)\n\n def __init__(self, data_table, meta_dict=None):\n # Validation\n if (data_table is None or\n (isinstance(data_table, (list, tuple)) and not len(data_table)) or\n (isinstance(data_table, pd.DataFrame) and not len(data_table.columns))\n ):\n data_table = self._make_blank()\n else:\n if not isinstance(data_table, pd.DataFrame):\n # Rarely if ever needed -- prefer from_rows, from_columns, etc.\n data_table = pd.DataFrame(data_table)\n if not all(c in data_table.columns for c in self._required_columns):\n raise ValueError(\"data table must have at least columns %r; \"\n \"got %r\" % (self._required_columns,\n tuple(data_table.columns)))\n # Ensure columns are the right type\n # (in case they've been automatically converted to the wrong type,\n # e.g. chromosome names as integers; genome coordinates as floats)\n if len(data_table):\n def ok_dtype(col, dt):\n return isinstance(data_table[col].iat[0], dt)\n else:\n def ok_dtype(col, dt):\n return data_table[col].dtype == np.dtype(dt)\n recast_cols = {col: dtype\n for col, dtype in zip(self._required_columns, self._required_dtypes)\n if not ok_dtype(col, dtype)\n }\n if recast_cols:\n data_table = data_table.astype(recast_cols)\n\n self.data = data_table\n self.meta = (dict(meta_dict)\n if meta_dict is not None and len(meta_dict)\n else {})\n\n @classmethod\n def _make_blank(cls):\n \"\"\"Create an empty dataframe with the columns required by this class.\"\"\"\n spec = list(zip(cls._required_columns, cls._required_dtypes))\n try:\n arr = np.zeros(0, dtype=spec)\n return pd.DataFrame(arr)\n except TypeError as exc:\n raise TypeError(\"{}: {}\".format(exc, spec))\n\n @classmethod\n def from_columns(cls, columns, meta_dict=None):\n \"\"\"Create a new instance from column arrays, given as a dict.\"\"\"\n table = pd.DataFrame.from_dict(columns)\n ary = cls(table, meta_dict)\n ary.sort_columns()\n return ary\n\n @classmethod\n def from_rows(cls, rows, columns=None, meta_dict=None):\n \"\"\"Create a new instance from a list of rows, as tuples or arrays.\"\"\"\n if columns is None:\n columns = cls._required_columns\n table = pd.DataFrame.from_records(rows, columns=columns)\n return cls(table, meta_dict)\n\n def as_columns(self, **columns):\n \"\"\"Wrap the named columns in this instance's metadata.\"\"\"\n return self.__class__.from_columns(columns, self.meta)\n # return self.__class__(self.data.loc[:, columns], self.meta.copy())\n\n def as_dataframe(self, dframe, reset_index=False):\n \"\"\"Wrap the given pandas DataFrame in this instance's metadata.\"\"\"\n if reset_index:\n dframe = dframe.reset_index(drop=True)\n return self.__class__(dframe, self.meta.copy())\n\n def as_series(self, arraylike):\n return pd.Series(arraylike, index=self.data.index)\n\n def as_rows(self, rows):\n \"\"\"Wrap the given rows in this instance's metadata.\"\"\"\n try:\n out = self.from_rows(rows,\n columns=self.data.columns,\n meta_dict=self.meta)\n except AssertionError:\n columns = self.data.columns.tolist()\n firstrow = next(iter(rows))\n raise RuntimeError(\"Passed %d columns %r, but \"\n \"%d elements in first row: %s\",\n len(columns), columns, len(firstrow), firstrow)\n return out\n\n # Container behaviour\n\n def __bool__(self):\n return bool(len(self.data))\n\n def __eq__(self, other):\n return (isinstance(other, self.__class__) and\n self.data.equals(other.data))\n\n def __len__(self):\n return len(self.data)\n\n def __contains__(self, key):\n return key in self.data.columns\n\n def __getitem__(self, index):\n \"\"\"Access a portion of the data.\n\n Cases:\n\n - single integer: a row, as pd.Series\n - string row name: a column, as pd.Series\n - a boolean array: masked rows, as_dataframe\n - tuple of integers: selected rows, as_dataframe\n \"\"\"\n if isinstance(index, int):\n # A single row\n return self.data.iloc[index]\n # return self.as_dataframe(self.data.iloc[index:index+1])\n elif isinstance(index, str):\n # A column, by name\n return self.data[index]\n elif (isinstance(index, tuple) and\n len(index) == 2 and\n index[1] in self.data.columns):\n # Row index, column index -> cell value\n return self.data.loc[index]\n elif isinstance(index, slice):\n # return self.as_dataframe(self.data.take(index))\n return self.as_dataframe(self.data[index])\n else:\n # Iterable -- selected row indices or boolean array, probably\n try:\n if isinstance(index, type(None)) or len(index) == 0:\n empty = pd.DataFrame(columns=self.data.columns)\n return self.as_dataframe(empty)\n except TypeError:\n raise TypeError(\"object of type %r \" % type(index) +\n \"cannot be used as an index into a \" +\n self.__class__.__name__)\n return self.as_dataframe(self.data[index])\n # return self.as_dataframe(self.data.take(index))\n\n def __setitem__(self, index, value):\n \"\"\"Assign to a portion of the data.\"\"\"\n if isinstance(index, int):\n self.data.iloc[index] = value\n elif isinstance(index, str):\n self.data[index] = value\n elif (isinstance(index, tuple) and\n len(index) == 2 and\n index[1] in self.data.columns):\n self.data.loc[index] = value\n else:\n assert isinstance(index, slice) or len(index) > 0\n self.data[index] = value\n\n def __delitem__(self, index):\n return NotImplemented\n\n def __iter__(self):\n return self.data.itertuples(index=False)\n\n __next__ = next\n\n @property\n def chromosome(self):\n return self.data['chromosome']\n\n @property\n def start(self):\n return self.data['start']\n\n @property\n def end(self):\n return self.data['end']\n\n @property\n def sample_id(self):\n return self.meta.get('sample_id')\n\n # Traversal\n\n def autosomes(self, also=()):\n \"\"\"Select chromosomes w/ integer names, ignoring any 'chr' prefixes.\"\"\"\n is_auto = self.chromosome.str.match(r\"(chr)?\\d+$\", na=False)\n if not is_auto.any():\n # The autosomes, if any, are not named with plain integers\n return self\n if also:\n if isinstance(also, str):\n also = [also]\n for a_chrom in also:\n is_auto |= (self.chromosome == a_chrom)\n return self[is_auto]\n\n def by_arm(self, min_gap_size=1e5, min_arm_bins=50):\n \"\"\"Iterate over bins grouped by chromosome arm (inferred).\"\"\"\n # ENH:\n # - Accept GArray of actual centromere regions as input\n # -> find largest gap (any size) within cmere region, split there\n # - Cache centromere locations once found\n self.data.chromosome = self.data.chromosome.astype(str)\n for chrom, subtable in self.data.groupby(\"chromosome\", sort=False):\n margin = max(min_arm_bins, int(round(.1 * len(subtable))))\n if len(subtable) > 2 * margin + 1:\n # Found a candidate centromere\n gaps = (subtable.start.values[margin+1:-margin] -\n subtable.end.values[margin:-margin-1])\n cmere_idx = gaps.argmax() + margin + 1\n cmere_size = gaps[cmere_idx - margin - 1]\n else:\n cmere_idx = 0\n cmere_size = 0\n if cmere_idx and cmere_size >= min_gap_size:\n logging.debug(\"%s centromere at %d of %d bins (size %s)\",\n chrom, cmere_idx, len(subtable), cmere_size)\n p_arm = subtable.index[:cmere_idx]\n yield chrom, self.as_dataframe(subtable.loc[p_arm,:])\n q_arm = subtable.index[cmere_idx:]\n yield chrom, self.as_dataframe(subtable.loc[q_arm,:])\n else:\n # No centromere found -- emit the whole chromosome\n if cmere_idx:\n logging.debug(\"%s: Ignoring centromere at %d of %d bins (size %s)\",\n chrom, cmere_idx, len(subtable), cmere_size)\n else:\n logging.debug(\"%s: Skipping centromere search, too small\",\n chrom)\n yield chrom, self.as_dataframe(subtable)\n\n def by_chromosome(self):\n \"\"\"Iterate over bins grouped by chromosome name.\"\"\"\n for chrom, subtable in self.data.groupby(\"chromosome\", sort=False):\n yield chrom, self.as_dataframe(subtable)\n\n def by_ranges(self, other, mode='outer', keep_empty=True):\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n for bin_row, subrange in by_ranges(self.data, other.data,\n mode, keep_empty):\n if len(subrange):\n yield bin_row, self.as_dataframe(subrange)\n elif keep_empty:\n yield bin_row, self.as_rows(subrange)\n\n def coords(self, also=()):\n \"\"\"Iterate over plain coordinates of each bin: chromosome, start, end.\n\n Parameters\n ----------\n also : str, or iterable of strings\n Also include these columns from `self`, in addition to chromosome,\n start, and end.\n\n Example, yielding rows in BED format:\n\n >>> probes.coords(also=[\"gene\", \"strand\"])\n \"\"\"\n cols = list(GenomicArray._required_columns)\n if also:\n if isinstance(also, str):\n cols.append(also)\n else:\n cols.extend(also)\n coordframe = self.data.loc[:, cols]\n return coordframe.itertuples(index=False)\n\n def labels(self):\n return self.data.apply(to_label, axis=1)\n\n def in_range(self, chrom=None, start=None, end=None, mode='outer'):\n \"\"\"Get the GenomicArray portion within the given genomic range.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n start : int or None\n Start coordinate of range to select, in 0-based coordinates.\n If None, start from 0.\n end : int or None\n End coordinate of range to select. If None, select to the end of the\n chromosome.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n The subset of `self` enclosed by the specified range.\n \"\"\"\n if isinstance(start, (int, np.int64, float, np.float64)):\n start = [int(start)]\n if isinstance(end, (int, np.int64, float, np.float64)):\n end = [int(end)]\n results = iter_ranges(self.data, chrom, start, end, mode)\n return self.as_dataframe(next(results))\n\n def in_ranges(self, chrom=None, starts=None, ends=None, mode='outer'):\n \"\"\"Get the GenomicArray portion within the specified ranges.\n\n Similar to `in_ranges`, but concatenating the selections of all the\n regions specified by the `starts` and `ends` arrays.\n\n Parameters\n ----------\n chrom : str or None\n Chromosome name to select. Use None if `self` has only one\n chromosome.\n starts : int array, or None\n Start coordinates of ranges to select, in 0-based coordinates.\n If None, start from 0.\n ends : int array, or None\n End coordinates of ranges to select. If None, select to the end of the\n chromosome. If `starts` and `ends` are both specified, they must be\n arrays of equal length.\n mode : str\n As in `by_ranges`: ``outer`` includes bins straddling the range\n boundaries, ``trim`` additionally alters the straddling bins'\n endpoints to match the range boundaries, and ``inner`` excludes\n those bins.\n\n Returns\n -------\n GenomicArray\n Concatenation of all the subsets of `self` enclosed by the specified\n ranges.\n \"\"\"\n table = pd.concat(iter_ranges(self.data, chrom, starts, ends, mode),\n sort=False)\n return self.as_dataframe(table)\n\n def into_ranges(self, other, column, default, summary_func=None):\n \"\"\"Re-bin values from `column` into the corresponding ranges in `other`.\n\n Match overlapping/intersecting rows from `other` to each row in `self`.\n Then, within each range in `other`, extract the value(s) from `column`\n in `self`, using the function `summary_func` to produce a single value\n if multiple bins in `self` map to a single range in `other`.\n\n For example, group SNVs (self) by CNV segments (other) and calculate the\n median (summary_func) of each SNV group's allele frequencies.\n\n Parameters\n ----------\n other : GenomicArray\n Ranges into which the overlapping values of `self` will be\n summarized.\n column : string\n Column name in `self` to extract values from.\n default\n Value to assign to indices in `other` that do not overlap any bins in\n `self`. Type should be the same as or compatible with the output\n field specified by `column`, or the output of `summary_func`.\n summary_func : callable, dict of string-to-callable, or None\n Specify how to reduce 1 or more `other` rows into a single value for\n the corresponding row in `self`.\n\n - If callable, apply to the `column` field each group of rows in\n `other` column.\n - If a single-element dict of column name to callable, apply to that\n field in `other` instead of `column`.\n - If None, use an appropriate summarizing function for the datatype\n of the `column` column in `other` (e.g. median of numbers,\n concatenation of strings).\n - If some other value, assign that value to `self` wherever there is\n an overlap.\n\n Returns\n -------\n pd.Series\n The extracted and summarized values from `self` corresponding to\n other's genomic ranges, the same length as `other`.\n \"\"\"\n if column not in self:\n logging.warning(\"No '%s' column available for summary calculation\",\n column)\n return pd.Series(np.repeat(default, len(other)))\n return into_ranges(self.data, other.data, column, default, summary_func)\n\n def iter_ranges_of(self, other, column, mode='outer', keep_empty=True):\n \"\"\"Group rows by another GenomicArray's bin coordinate ranges.\n\n For example, this can be used to group SNVs by CNV segments.\n\n Bins in this array that fall outside the other array's bins are skipped.\n\n Parameters\n ----------\n other : GenomicArray\n Another GA instance.\n column : string\n Column name in `self` to extract values from.\n mode : string\n Determines what to do with bins that overlap a boundary of the\n selection. Possible values are:\n\n - ``inner``: Drop the bins on the selection boundary, don't emit them.\n - ``outer``: Keep/emit those bins as they are.\n - ``trim``: Emit those bins but alter their boundaries to match the\n selection; the bin start or end position is replaced with the\n selection boundary position.\n keep_empty : bool\n Whether to also yield `other` bins with no overlapping bins in\n `self`, or to skip them when iterating.\n\n Yields\n ------\n tuple\n (other bin, GenomicArray of overlapping rows in self)\n \"\"\"\n if column not in self.data.columns:\n raise ValueError(\"No column named %r in this object\" % column)\n ser = self.data[column]\n for slc in iter_slices(self.data, other.data, mode, keep_empty):\n yield ser[slc]\n\n # Modification\n\n def add(self, other):\n \"\"\"Combine this array's data with another GenomicArray (in-place).\n\n Any optional columns must match between both arrays.\n \"\"\"\n if not isinstance(other, self.__class__):\n raise ValueError(\"Argument (type %s) is not a %s instance\"\n % (type(other), self.__class__))\n if len(other.data):\n self.data = self.data.append(other.data, ignore_index=True)\n self.sort()\n\n def concat(self, others):\n \"\"\"Concatenate several GenomicArrays, keeping this array's metadata.\n\n This array's data table is not implicitly included in the result.\n \"\"\"\n table = pd.concat([otr.data for otr in others], ignore_index=True)\n result = self.as_dataframe(table)\n result.sort()\n return result\n\n def copy(self):\n \"\"\"Create an independent copy of this object.\"\"\"\n return self.as_dataframe(self.data.copy())\n\n def add_columns(self, **columns):\n \"\"\"Add the given columns to a copy of this GenomicArray.\n\n Parameters\n ----------\n **columns : array\n Keyword arguments where the key is the new column's name and the\n value is an array of the same length as `self` which will be the new\n column's values.\n\n Returns\n -------\n GenomicArray or subclass\n A new instance of `self` with the given columns included in the\n underlying dataframe.\n \"\"\"\n return self.as_dataframe(self.data.assign(**columns))\n\n def keep_columns(self, colnames):\n \"\"\"Extract a subset of columns, reusing this instance's metadata.\"\"\"\n colnames = self.data.columns.intersection(colnames)\n return self.__class__(self.data.loc[:, colnames], self.meta.copy())\n\n def drop_extra_columns(self):\n \"\"\"Remove any optional columns from this GenomicArray.\n\n Returns\n -------\n GenomicArray or subclass\n A new copy with only the minimal set of columns required by the\n class (e.g. chromosome, start, end for GenomicArray; may be more for\n subclasses).\n \"\"\"\n table = self.data.loc[:, self._required_columns]\n return self.as_dataframe(table)\n\n def filter(self, func=None, **kwargs):\n \"\"\"Take a subset of rows where the given condition is true.\n\n Parameters\n ----------\n func : callable\n A boolean function which will be applied to each row to keep rows\n where the result is True.\n **kwargs : string\n Keyword arguments like ``chromosome=\"chr7\"`` or\n ``gene=\"Antitarget\"``, which will keep rows where the keyed field\n equals the specified value.\n\n Return\n ------\n GenomicArray\n Subset of `self` where the specified condition is True.\n \"\"\"\n table = self.data\n if func is not None:\n table = table[table.apply(func, axis=1)]\n for key, val in list(kwargs.items()):\n assert key in self\n table = table[table[key] == val]\n return self.as_dataframe(table)\n\n def shuffle(self):\n \"\"\"Randomize the order of bins in this array (in-place).\"\"\"\n order = np.arange(len(self.data))\n np.random.seed(0xA5EED)\n np.random.shuffle(order)\n self.data = self.data.iloc[order]\n return order\n\n def sort(self):\n \"\"\"Sort this array's bins in-place, with smart chromosome ordering.\"\"\"\n sort_key = self.data.chromosome.apply(sorter_chrom)\n self.data = (self.data.assign(_sort_key_=sort_key)\n .sort_values(by=['_sort_key_', 'start', 'end'],\n kind='mergesort')\n .drop('_sort_key_', axis=1)\n .reset_index(drop=True))\n\n def sort_columns(self):\n \"\"\"Sort this array's columns in-place, per class definition.\"\"\"\n extra_cols = []\n for col in self.data.columns:\n if col not in self._required_columns:\n extra_cols.append(col)\n sorted_colnames = list(self._required_columns) + sorted(extra_cols)\n assert len(sorted_colnames) == len(self.data.columns)\n self.data = self.data.reindex(columns=sorted_colnames)\n\n # Genome arithmetic\n\n def cut(self, other, combine=None):\n \"\"\"Split this array's regions at the boundaries in `other`.\"\"\"\n # TODO\n return NotImplemented\n\n def flatten(self, combine=None, split_columns=None):\n \"\"\"Split this array's regions where they overlap.\"\"\"\n return self.as_dataframe(flatten(self.data, combine=combine,\n split_columns=split_columns))\n\n def intersection(self, other, mode='outer'):\n \"\"\"Select the bins in `self` that overlap the regions in `other`.\n\n The extra fields of `self`, but not `other`, are retained in the output.\n \"\"\"\n # TODO options for which extra fields to keep\n # by default, keep just the fields in 'table'\n if mode == 'trim':\n # Slower\n chunks = [chunk.data for _, chunk in\n self.by_ranges(other, mode=mode, keep_empty=False)]\n return self.as_dataframe(pd.concat(chunks))\n else:\n slices = iter_slices(self.data, other.data, mode, False)\n indices = np.concatenate(list(slices))\n return self.as_dataframe(self.data.loc[indices])\n\n def merge(self, bp=0, stranded=False, combine=None):\n \"\"\"Merge adjacent or overlapping regions into single rows.\n\n Similar to 'bedtools merge'.\n \"\"\"\n return self.as_dataframe(merge(self.data, bp, stranded, combine))\n\n def resize_ranges(self, bp, chrom_sizes=None):\n \"\"\"Resize each genomic bin by a fixed number of bases at each end.\n\n Bin 'start' values have a minimum of 0, and `chrom_sizes` can\n specify each chromosome's maximum 'end' value.\n\n Similar to 'bedtools slop'.\n\n Parameters\n ----------\n bp : int\n Number of bases in each direction to expand or shrink each bin.\n Applies to 'start' and 'end' values symmetrically, and may be\n positive (expand) or negative (shrink).\n chrom_sizes : dict of string-to-int\n Chromosome name to length in base pairs. If given, all chromosomes\n in `self` must be included.\n \"\"\"\n table = self.data\n limits = dict(lower=0)\n if chrom_sizes:\n limits['upper'] = self.chromosome.replace(chrom_sizes)\n table = table.assign(start=(table['start'] - bp).clip(**limits),\n end=(table['end'] + bp).clip(**limits))\n if bp < 0:\n # Drop any bins that now have zero or negative size\n ok_size = table['end'] - table['start'] > 0\n logging.debug(\"Dropping %d bins with size <= 0\", (~ok_size).sum())\n table = table[ok_size]\n # Don't modify the original\n return self.as_dataframe(table.copy())\n\n def squash(self, combine=None):\n \"\"\"Combine some groups of rows, by some criteria, into single rows.\"\"\"\n # TODO\n return NotImplemented\n\n def subdivide(self, avg_size, min_size=0, verbose=False):\n \"\"\"Split this array's regions into roughly equal-sized sub-regions.\"\"\"\n return self.as_dataframe(subdivide(self.data, avg_size, min_size,\n verbose))\n\n def subtract(self, other):\n \"\"\"Remove the overlapping regions in `other` from this array.\"\"\"\n return self.as_dataframe(subtract(self.data, other.data))\n\n def total_range_size(self):\n \"\"\"Total number of bases covered by all (merged) regions.\"\"\"\n if not len(self):\n return 0\n regions = merge(self.data, bp=1)\n return regions.end.sum() - regions.start.sum()\n\n def _get_gene_map(self):\n \"\"\"Map unique gene names to their indices in this array.\n\n Returns\n -------\n OrderedDict\n An (ordered) dictionary of unique gene names and the data indices of\n their segments in the order of occurrence (genomic order).\n \"\"\"\n if 'gene' not in self.data:\n return OrderedDict()\n\n genes = OrderedDict()\n for idx, genestr in self.data['gene'].iteritems():\n if pd.isnull(genestr):\n continue\n for gene in genestr.split(','):\n if gene not in genes:\n genes[gene] = []\n genes[gene].append(idx)\n return genes\n"
] |
[
[
"pandas.concat",
"pandas.Series",
"numpy.random.seed",
"pandas.isnull",
"numpy.random.shuffle",
"pandas.DataFrame",
"numpy.dtype",
"pandas.DataFrame.from_dict",
"pandas.DataFrame.from_records",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"1.3",
"0.19",
"1.1",
"1.5",
"0.24",
"0.20",
"1.0",
"0.25",
"1.2"
],
"scipy": [],
"tensorflow": []
}
] |
Slowika/GameBias-EmeCom2020
|
[
"5b94c47559f8202bca99c26fc1bcb078dd0509a6"
] |
[
"lib/EGG_research/egg/zoo/language_bottleneck/mnist_adv/train.py"
] |
[
"# Copyright (c) Facebook, Inc. and its affiliates.\n\n# This source code is licensed under the MIT license found in the\n# LICENSE file in the root directory of this source tree.\n\nimport argparse\nimport json\nfrom torch.nn import functional as F\nimport torch.utils.data\nfrom torchvision import datasets, transforms\nimport torch.distributions\nimport egg.core as core\n\nfrom egg.zoo.language_bottleneck.mnist_adv.archs import Sender, Receiver\nfrom egg.zoo.language_bottleneck.relaxed_channel import AlwaysRelaxedWrapper\nfrom egg.core import EarlyStopperAccuracy\nfrom egg.zoo.language_bottleneck.mnist_classification.data import DoubleMnist\n\n\ndef diff_loss_symbol(_sender_input, _message, _receiver_input, receiver_output, labels):\n loss = F.nll_loss(receiver_output, labels, reduction='none').mean()\n acc = (receiver_output.argmax(dim=1) == labels).float()\n return loss, {'acc': acc}\n\n\ndef get_params(params):\n parser = argparse.ArgumentParser()\n parser.add_argument('--temperature', type=float, default=1.0,\n help=\"GS temperature for the sender (default: 1)\")\n\n parser.add_argument('--early_stopping_thr', type=float, default=1.0,\n help=\"Early stopping threshold on accuracy (default: 1.0)\")\n\n parser.add_argument('--softmax_non_linearity', type=int, default=0,\n help=\"Disable GS training, treat channel as softmax non-linearity (default: 0)\")\n\n parser.add_argument('--linear_channel', type=int, default=0,\n help=\"Disable GS training, treat channel as a linear connection (default: 0)\")\n\n args = core.init(parser, params)\n\n assert not (args.softmax_non_linearity == 1 and args.linear_channel == 1)\n return args\n\n\ndef main(params):\n opts = get_params(params)\n print(opts)\n\n kwargs = {'num_workers': 1, 'pin_memory': True} if opts.cuda else {}\n transform = transforms.ToTensor()\n\n train_dataset = datasets.MNIST('./data', train=True, download=True,\n transform=transform)\n test_dataset = datasets.MNIST('./data', train=False, download=False,\n transform=transform)\n n_classes = 10\n label_mapping = torch.LongTensor([x % n_classes for x in range(100)])\n\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=opts.batch_size, shuffle=True, **kwargs)\n train_loader = DoubleMnist(train_loader, label_mapping)\n\n test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=16 * 1024, shuffle=False, **kwargs)\n test_loader = DoubleMnist(test_loader, label_mapping)\n\n\n sender = Sender(vocab_size=opts.vocab_size, linear_channel=opts.linear_channel == 1,\n softmax_channel=opts.softmax_non_linearity)\n receiver = Receiver(vocab_size=opts.vocab_size, n_classes=n_classes)\n\n if opts.softmax_non_linearity == 0 and opts.linear_channel == 0:\n sender = AlwaysRelaxedWrapper(sender, temperature=opts.temperature)\n\n game = core.SymbolGameGS(sender, receiver, diff_loss_symbol)\n\n optimizer = core.build_optimizer(game.parameters())\n\n trainer = core.Trainer(game=game, optimizer=optimizer, train_data=train_loader,\n validation_data=test_loader,\n callbacks=[core.ConsoleLogger(as_json=True, print_train_loss=True),\n EarlyStopperAccuracy(opts.early_stopping_thr)])\n\n trainer.train(n_epochs=opts.n_epochs)\n core.close()\n\nif __name__ == \"__main__\":\n import sys\n main(sys.argv[1:])\n"
] |
[
[
"torch.nn.functional.nll_loss"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
immortalFrogJiang/mars
|
[
"93c786e38bdc0fbb483282d7792379db0345a3b6",
"93c786e38bdc0fbb483282d7792379db0345a3b6",
"93c786e38bdc0fbb483282d7792379db0345a3b6"
] |
[
"mars/tensor/expressions/linalg/svd.py",
"mars/tensor/execution/arithmetic.py",
"mars/core.py"
] |
[
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport numpy as np\nfrom numpy.linalg import LinAlgError\n\nfrom .... import operands\nfrom ...core import ExecutableTuple\nfrom ..datasource import tensor as astensor\nfrom ..core import TensorOperandMixin\nfrom .core import TSQR\n\n\nclass TensorSVD(operands.SVD, TensorOperandMixin):\n def __init__(self, method=None, dtype=None, **kw):\n super(TensorSVD, self).__init__(_method=method, _dtype=dtype, **kw)\n\n @classmethod\n def _is_svd(cls):\n return True\n\n def _set_inputs(self, inputs):\n super(TensorSVD, self)._set_inputs(inputs)\n self._input = self._inputs[0]\n\n def calc_shape(self, *inputs_shape):\n x, y = inputs_shape[0]\n if x > y:\n U_shape = (x, y)\n s_shape = (y, )\n V_shape = (y, y)\n else:\n U_shape = (x, x)\n s_shape = (x, )\n V_shape = (x, y)\n return U_shape, s_shape, V_shape\n\n def __call__(self, a):\n a = astensor(a)\n\n if a.ndim != 2:\n raise LinAlgError('{0}-dimensional tensor given. '\n 'Tensor must be two-dimensional'.format(a.ndim))\n\n tiny_U, tiny_s, tiny_V = np.linalg.svd(np.ones((1, 1), dtype=a.dtype))\n\n # if a's shape is (6, 18), U's shape is (6, 6), s's shape is (6,), V's shape is (6, 18)\n # if a's shape is (18, 6), U's shape is (18, 6), s's shape is (6,), V's shape is (6, 6)\n x, y = a.shape\n if x > y:\n U_shape = (x, y)\n s_shape = (y, )\n V_shape = (y, y)\n else:\n U_shape = (x, x)\n s_shape = (x, )\n V_shape = (x, y)\n U, s, V = self.new_tensors([a], (U_shape, s_shape, V_shape),\n kws=[\n {'side': 'U', 'dtype': tiny_U.dtype},\n {'side': 's', 'dtype': tiny_s.dtype},\n {'side': 'V', 'dtype': tiny_V.dtype}\n ])\n return ExecutableTuple([U, s, V])\n\n @classmethod\n def tile(cls, op):\n U, s, V = op.outputs\n U_dtype, s_dtype, V_dtype = U.dtype, s.dtype, V.dtype\n U_shape, s_shape, V_shape = U.shape, s.shape, V.shape\n in_tensor = op.input\n if in_tensor.chunk_shape == (1, 1):\n in_chunk = in_tensor.chunks[0]\n chunk_op = op.copy().reset_key()\n svd_chunks = chunk_op.new_chunks([in_chunk], (U_shape, s_shape, V_shape),\n kws=[\n {'side': 'U', 'dtype': U_dtype,\n 'index': in_chunk.index},\n {'side': 's', 'dtype': s_dtype,\n 'index': in_chunk.index[1:]},\n {'side': 'V', 'dtype': V_dtype,\n 'index': in_chunk.index}\n ])\n U_chunk, s_chunk, V_chunk = svd_chunks\n\n new_op = op.copy()\n kws = [\n {'chunks': [U_chunk], 'nsplits': tuple((s,) for s in U_shape), 'dtype': U_dtype},\n {'chunks': [s_chunk], 'nsplits': tuple((s,) for s in s_shape), 'dtype': s_dtype},\n {'chunks': [V_chunk], 'nsplits': tuple((s,) for s in V_shape), 'dtype': V_dtype}\n ]\n return new_op.new_tensors(op.inputs, [U_shape, s_shape, V_shape], kws=kws)\n elif op.method == 'tsqr':\n return TSQR.tile(op)\n else:\n raise NotImplementedError('Only tsqr method supported for now')\n\n\ndef svd(a, method='tsqr'):\n \"\"\"\n Singular Value Decomposition.\n\n When `a` is a 2D tensor, it is factorized as ``u @ np.diag(s) @ vh\n = (u * s) @ vh``, where `u` and `vh` are 2D unitary tensors and `s` is a 1D\n tensor of `a`'s singular values. When `a` is higher-dimensional, SVD is\n applied in stacked mode as explained below.\n\n Parameters\n ----------\n a : (..., M, N) array_like\n A real or complex tensor with ``a.ndim >= 2``.\n method: {'tsqr'}, optional\n method to calculate qr factorization, tsqr as default\n\n TSQR is presented in:\n\n A. Benson, D. Gleich, and J. Demmel.\n Direct QR factorizations for tall-and-skinny matrices in\n MapReduce architectures.\n IEEE International Conference on Big Data, 2013.\n http://arxiv.org/abs/1301.1071\n\n\n Returns\n -------\n u : { (..., M, M), (..., M, K) } tensor\n Unitary tensor(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n s : (..., K) tensor\n Vector(s) with the singular values, within each vector sorted in\n descending order. The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`.\n vh : { (..., N, N), (..., K, N) } tensor\n Unitary tensor(s). The first ``a.ndim - 2`` dimensions have the same\n size as those of the input `a`. The size of the last two dimensions\n depends on the value of `full_matrices`. Only returned when\n `compute_uv` is True.\n\n Raises\n ------\n LinAlgError\n If SVD computation does not converge.\n\n Notes\n -----\n\n SVD is usually described for the factorization of a 2D matrix :math:`A`.\n The higher-dimensional case will be discussed below. In the 2D case, SVD is\n written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,\n :math:`S= \\\\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D tensor `s`\n contains the singular values of `a` and `u` and `vh` are unitary. The rows\n of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are\n the eigenvectors of :math:`A A^H`. In both cases the corresponding\n (possibly non-zero) eigenvalues are given by ``s**2``.\n\n If `a` has more than two dimensions, then broadcasting rules apply, as\n explained in :ref:`routines.linalg-broadcasting`. This means that SVD is\n working in \"stacked\" mode: it iterates over all indices of the first\n ``a.ndim - 2`` dimensions and for each combination SVD is applied to the\n last two indices. The matrix `a` can be reconstructed from the\n decomposition with either ``(u * s[..., None, :]) @ vh`` or\n ``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the\n function ``mt.matmul`` for python versions below 3.5.)\n\n Examples\n --------\n >>> import mars.tensor as mt\n >>> a = mt.random.randn(9, 6) + 1j*mt.random.randn(9, 6)\n >>> b = mt.random.randn(2, 7, 8, 3) + 1j*mt.random.randn(2, 7, 8, 3)\n\n Reconstruction based on reduced SVD, 2D case:\n\n >>> u, s, vh = mt.linalg.svd(a)\n >>> u.shape, s.shape, vh.shape\n ((9, 6), (6,), (6, 6))\n >>> np.allclose(a, np.dot(u * s, vh))\n True\n >>> smat = np.diag(s)\n >>> np.allclose(a, np.dot(u, np.dot(smat, vh)))\n True\n\n \"\"\"\n op = TensorSVD(method=method)\n return op(a)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport operator\n\nimport numpy as np\n\nfrom .array import as_same_device, device, is_sparse_module, get_array_module\nfrom ..expressions import arithmetic\nfrom ...compat import reduce, six\n\nOP_TO_HANDLER = {\n arithmetic.TensorAdd: 'add',\n arithmetic.TensorSubtract: 'subtract',\n arithmetic.TensorMultiply: 'multiply',\n arithmetic.TensorDivide: 'divide',\n arithmetic.TensorTrueDiv: 'true_divide',\n arithmetic.TensorFloorDiv: 'floor_divide',\n arithmetic.TensorPower: 'power',\n arithmetic.TensorFloatPower: 'float_power',\n arithmetic.TensorMod: 'mod',\n arithmetic.TensorFMod: 'fmod',\n arithmetic.TensorLogAddExp: 'logaddexp',\n arithmetic.TensorLogAddExp2: 'logaddexp2',\n arithmetic.TensorNegative: 'negative',\n arithmetic.TensorPositive: operator.pos,\n arithmetic.TensorAbsolute: 'absolute',\n arithmetic.TensorAbs: 'abs',\n arithmetic.TensorFabs: 'fabs',\n arithmetic.TensorRint: 'rint',\n arithmetic.TensorSign: 'sign',\n arithmetic.TensorConj: 'conj',\n arithmetic.TensorExp: 'exp',\n arithmetic.TensorExp2: 'exp2',\n arithmetic.TensorLog: 'log',\n arithmetic.TensorLog2: 'log2',\n arithmetic.TensorLog10: 'log10',\n arithmetic.TensorExpm1: 'expm1',\n arithmetic.TensorLog1p: 'log1p',\n arithmetic.TensorSqrt: 'sqrt',\n arithmetic.TensorSquare: 'square',\n arithmetic.TensorCbrt: 'cbrt',\n arithmetic.TensorReciprocal: 'reciprocal',\n arithmetic.TensorAround: 'around',\n arithmetic.TensorIsFinite: 'isfinite',\n arithmetic.TensorIsInf: 'isinf',\n arithmetic.TensorIsNan: 'isnan',\n arithmetic.TensorSignbit: 'signbit',\n arithmetic.TensorCopysign: 'copysign',\n arithmetic.TensorNextafter: 'nextafter',\n arithmetic.TensorSpacing: 'spacing',\n arithmetic.TensorLdexp: 'ldexp',\n arithmetic.TensorFloor: 'floor',\n arithmetic.TensorCeil: 'ceil',\n arithmetic.TensorTrunc: 'trunc',\n arithmetic.TensorDegrees: 'degrees',\n arithmetic.TensorRadians: 'radians',\n\n arithmetic.TensorEqual: 'equal',\n arithmetic.TensorNotEqual: 'not_equal',\n arithmetic.TensorLessThan: 'less',\n arithmetic.TensorLessEqual: 'less_equal',\n arithmetic.TensorGreaterThan: 'greater',\n arithmetic.TensorGreaterEqual: 'greater_equal',\n\n arithmetic.TensorSin: 'sin',\n arithmetic.TensorCos: 'cos',\n arithmetic.TensorTan: 'tan',\n arithmetic.TensorArcsin: 'arcsin',\n arithmetic.TensorArccos: 'arccos',\n arithmetic.TensorArctan: 'arctan',\n arithmetic.TensorArctan2: 'arctan2',\n arithmetic.TensorHypot: 'hypot',\n arithmetic.TensorSinh: 'sinh',\n arithmetic.TensorCosh: 'cosh',\n arithmetic.TensorTanh: 'tanh',\n arithmetic.TensorArcsinh: 'arcsinh',\n arithmetic.TensorArccosh: 'arccosh',\n arithmetic.TensorArctanh: 'arctanh',\n arithmetic.TensorDeg2rad: 'deg2rad',\n arithmetic.TensorRad2deg: 'rad2deg',\n arithmetic.TensorAngle: 'angle',\n\n arithmetic.TensorBitand: 'bitwise_and',\n arithmetic.TensorBitor: 'bitwise_or',\n arithmetic.TensorBitxor: 'bitwise_xor',\n arithmetic.TensorInvert: 'invert',\n\n arithmetic.TensorLshift: 'left_shift',\n arithmetic.TensorRshift: 'right_shift',\n\n arithmetic.TensorAnd: 'logical_and',\n arithmetic.TensorOr: 'logical_or',\n arithmetic.TensorXor: 'logical_xor',\n arithmetic.TensorNot: 'logical_not',\n\n arithmetic.TensorMaximum: 'maximum',\n arithmetic.TensorMinimum: 'minimum',\n arithmetic.TensorFMax: 'fmax',\n arithmetic.TensorFMin: 'fmin',\n\n arithmetic.TensorIsclose: 'isclose',\n\n arithmetic.TensorClip: 'clip',\n arithmetic.TensorIsReal: 'isreal',\n arithmetic.TensorIsComplex: 'iscomplex',\n arithmetic.TensorReal: 'real',\n arithmetic.TensorImag: 'imag',\n arithmetic.TensorFix: 'fix',\n arithmetic.TensorI0: 'i0',\n arithmetic.TensorSinc: 'sinc',\n arithmetic.TensorNanToNum: 'nan_to_num',\n}\n\n\ndef _handle_out_dtype(val, dtype):\n if val.dtype != dtype:\n return val.astype(dtype)\n return val\n\n\ndef _build_elementwise(op):\n def _handle(ctx, chunk):\n inputs, device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n if isinstance(op, six.string_types):\n func = getattr(xp, op)\n else:\n func = op\n\n with device(device_id):\n kw = {'casting': chunk.op.casting} if chunk.op.out else {}\n\n if chunk.op.out and chunk.op.where:\n inputs, kw['out'], kw['where'] = inputs[:-2], inputs[-2].copy(), inputs[-1]\n elif chunk.op.out:\n inputs, kw['out'] = inputs[:-1], inputs[-1].copy()\n\n with np.errstate(**chunk.op.err):\n if len(inputs) == 1:\n try:\n ctx[chunk.key] = _handle_out_dtype(func(inputs[0], **kw), chunk.op.dtype)\n except TypeError:\n if kw.get('where') is None:\n raise\n out, where = kw.pop('out'), kw.pop('where')\n ctx[chunk.key] = _handle_out_dtype(xp.where(where, func(inputs[0]), out),\n chunk.op.dtype)\n else:\n try:\n if is_sparse_module(xp):\n ctx[chunk.key] = _handle_out_dtype(reduce(lambda a, b: func(a, b, **kw), inputs),\n chunk.op.dtype)\n else:\n if 'out' not in kw:\n dest_value = xp.empty(chunk.shape, chunk.dtype)\n kw['out'] = dest_value\n ctx[chunk.key] = _handle_out_dtype(reduce(lambda a, b: func(a, b, **kw), inputs),\n chunk.op.dtype)\n except TypeError:\n if kw.get('where') is None:\n raise\n out, where = kw.pop('out'), kw.pop('where')\n ctx[chunk.key] = _handle_out_dtype(\n xp.where(where, reduce(lambda a, b: func(a, b), inputs), out),\n chunk.op.dtype)\n return _handle\n\n\ndef _const_elementwise(op):\n def _handle(ctx, chunk):\n if chunk.inputs is not None:\n try:\n _, device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n except KeyError:\n raise\n else:\n # all constants\n device_id, xp = -1, np\n\n if isinstance(op, six.string_types):\n func = getattr(xp, op)\n else:\n func = op\n\n get = lambda x: ctx[x.key] if hasattr(x, 'key') else x\n\n with device(device_id):\n with np.errstate(**chunk.op.err):\n ctx[chunk.key] = _handle_out_dtype(\n reduce(func, (get(chunk.op.lhs), get(chunk.op.rhs))), chunk.op.dtype)\n return _handle\n\n\ndef _around(ctx, chunk):\n (a,), device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n with device(device_id):\n ctx[chunk.key] = xp.around(a, decimals=chunk.op.decimals)\n\n\ndef _angle(ctx, chunk):\n (z,), device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n with device(device_id):\n ctx[chunk.key] = xp.angle(z, deg=chunk.op.deg)\n\n\ndef _isclose(ctx, chunk):\n (a, b), device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n with device(device_id):\n ctx[chunk.key] = xp.isclose(a, b, atol=chunk.op.atol, rtol=chunk.op.rtol,\n equal_nan=chunk.op.equal_nan)\n\n\ndef _frexp(ctx, chunk):\n inputs, device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n with device(device_id):\n kw = {'casting': chunk.op.casting}\n\n inputs_iter = iter(inputs)\n input = next(inputs_iter)\n if chunk.op.out1 is not None:\n out1 = next(inputs_iter)\n else:\n out1 = None\n if chunk.op.out2 is not None:\n out2 = next(inputs_iter)\n else:\n out2 = None\n if chunk.op.where is not None:\n where = kw['where'] = next(inputs_iter)\n else:\n where = None\n\n try:\n args = [input]\n if out1 is not None:\n args.append(out1)\n if out2 is not None:\n args.append(out2)\n mantissa, exponent = xp.frexp(*args, **kw)\n except TypeError:\n if where is None:\n raise\n mantissa, exponent = xp.frexp(input)\n mantissa, exponent = xp.where(where, mantissa, out1), xp.where(where, exponent, out2)\n\n for c, res in zip(chunk.op.outputs, (mantissa, exponent)):\n ctx[c.key] = res\n\n\ndef _modf(ctx, chunk):\n inputs, device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n with device(device_id):\n kw = {'casting': chunk.op.casting}\n\n inputs_iter = iter(inputs)\n input = next(inputs_iter)\n if chunk.op.out1 is not None:\n out1 = next(inputs_iter)\n else:\n out1 = None\n if chunk.op.out2 is not None:\n out2 = next(inputs_iter)\n else:\n out2 = None\n if chunk.op.where is not None:\n where = kw['where'] = next(inputs_iter)\n else:\n where = None\n\n try:\n args = [input]\n if out1 is not None:\n args.append(out1.copy())\n if out2 is not None:\n args.append(out2.copy())\n y1, y2 = xp.modf(*args, **kw)\n except TypeError:\n if where is None:\n raise\n y1, y2 = xp.modf(input)\n y1, y2 = xp.where(where, y1, out1), xp.where(where, y2, out2)\n\n for c, res in zip(chunk.op.outputs, (y1, y2)):\n ctx[c.key] = res\n\n\ndef _set_real(ctx, chunk):\n inputs, device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n if len(inputs) == 1:\n val, real = inputs[0], chunk.op.rhs\n else:\n assert len(inputs) == 2\n val, real = inputs\n\n with device(device_id):\n val = val.copy()\n val.real = real\n\n ctx[chunk.key] = val\n\n\ndef _set_imag(ctx, chunk):\n inputs, device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n if len(inputs) == 1:\n val, imag = inputs[0], chunk.op.rhs\n else:\n assert len(inputs) == 2\n val, imag = inputs\n\n with device(device_id):\n val = val.copy()\n val.imag = imag\n\n ctx[chunk.key] = val\n\n\ndef _clip(ctx, chunk):\n inputs, device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n inputs_iter = iter(inputs)\n a = next(inputs_iter)\n a_min = next(inputs_iter) if isinstance(chunk.op.a_min, type(chunk)) else chunk.op.a_min\n a_max = next(inputs_iter) if isinstance(chunk.op.a_max, type(chunk)) else chunk.op.a_max\n out = next(inputs_iter).copy() if chunk.op.out is not None else None\n\n with device(device_id):\n kw = {}\n if out is not None:\n kw['out'] = out\n ctx[chunk.key] = xp.clip(a, a_min, a_max, **kw)\n\n\ndef _i0(ctx, chunk):\n x = ctx[chunk.inputs[0].key]\n xp = get_array_module(x)\n res = xp.i0(x)\n if not is_sparse_module(xp):\n res = res.reshape(chunk.shape)\n ctx[chunk.key] = res\n\n\ndef _tree_add(ctx, chunk):\n inputs, device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n with device(device_id):\n ctx[chunk.key] = reduce(xp.add, inputs)\n\n\ndef _tree_multiply(ctx, chunk):\n inputs, device_id, xp = as_same_device(\n [ctx[c.key] for c in chunk.inputs], device=chunk.device, ret_extra=True)\n\n with device(device_id):\n ctx[chunk.key] = reduce(xp.multiply, inputs)\n\n\ndef register_arithmetic_handler():\n from ...executor import register\n\n for op, new_op in six.iteritems(OP_TO_HANDLER):\n register(op, _build_elementwise(new_op))\n if hasattr(op, 'constant_cls'):\n const_op = op.constant_cls()\n if const_op:\n register(const_op, _const_elementwise(OP_TO_HANDLER[op]))\n\n register(arithmetic.TensorSetReal, _set_real)\n register(arithmetic.TensorSetRealConstant, _set_real)\n register(arithmetic.TensorSetImag, _set_imag)\n register(arithmetic.TensorSetImagConstant, _set_imag)\n\n register(arithmetic.TensorTreeAdd, _tree_add)\n register(arithmetic.TensorTreeMultiply, _tree_multiply)\n register(arithmetic.TensorAround, _around)\n register(arithmetic.TensorAngle, _angle)\n register(arithmetic.TensorIsclose, _isclose)\n register(arithmetic.TensorFrexp, _frexp)\n register(arithmetic.TensorModf, _modf)\n register(arithmetic.TensorClip, _clip)\n register(arithmetic.TensorI0, _i0)\n",
"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright 1999-2018 Alibaba Group Holding Ltd.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nfrom operator import attrgetter, mul\nimport threading\nimport itertools\n\nimport numpy as np\n\nfrom .compat import six, izip, builtins, reduce\nfrom .utils import tokenize, AttributeDict, on_serialize_shape, on_deserialize_shape\nfrom .serialize import ValueType, ProviderType, Serializable, AttributeAsDict, \\\n TupleField, DictField, KeyField, BoolField, StringField\nfrom .tiles import Tilesable, handler\nfrom .graph import DAG\n\n\nclass Base(object):\n __slots__ = ()\n _no_copy_attrs_ = set()\n\n def __init__(self, *args, **kwargs):\n for slot, arg in izip(self.__slots__, args):\n object.__setattr__(self, slot, arg)\n\n for key, val in six.iteritems(kwargs):\n object.__setattr__(self, key, val)\n\n @property\n def _values_(self):\n return [getattr(self, k, None) for k in self.__slots__\n if k not in self._no_copy_attrs_]\n\n\nclass BaseWithKey(Base):\n __slots__ = '_key', '_id'\n _no_copy_attrs_ = {'_id'}\n _init_update_key_ = True\n\n def __init__(self, *args, **kwargs):\n super(BaseWithKey, self).__init__(*args, **kwargs)\n\n if self._init_update_key_ and (not hasattr(self, '_key') or not self._key):\n self.update_key()\n if not hasattr(self, '_id') or not self._id:\n self._id = str(id(self))\n\n def _obj_set(self, k, v):\n object.__setattr__(self, k, v)\n\n def update_key(self):\n self._obj_set('_key', tokenize(type(self), *self._values_))\n return self\n\n def reset_key(self):\n self._obj_set('_key', None)\n return self\n\n def update_id(self, new_id=None):\n new_id = new_id if new_id is not None else str(id(self))\n self._obj_set('_id', new_id)\n\n def __copy__(self):\n return self.copy()\n\n def copy(self):\n return self.copy_to(type(self)(_key=self.key))\n\n def copy_to(self, target):\n for attr in self.__slots__:\n if (attr.startswith('__') and attr.endswith('__')) or attr in self._no_copy_attrs_:\n # we don't copy id to identify that the copied one is new\n continue\n if hasattr(self, attr):\n setattr(target, attr, getattr(self, attr))\n\n return target\n\n def copy_from(self, obj):\n obj.copy_to(self)\n\n @property\n def key(self):\n return self._key\n\n @property\n def id(self):\n return self._id\n\n\nclass Entity(object):\n __slots__ = '_data',\n _allow_data_type_ = ()\n\n def __init__(self, data):\n self._check_data(data)\n self._data = data\n\n def _check_data(self, data):\n if data is not None and not isinstance(data, self._allow_data_type_):\n raise TypeError('Expect {0}, got {1}'.format(self._allow_data_type_, type(data)))\n\n @property\n def data(self):\n return self._data\n\n @data.setter\n def data(self, new_data):\n self._check_data(new_data)\n self._data = new_data\n\n def __copy__(self):\n return self.copy()\n\n def copy(self):\n self.copy_to(type(self)(None))\n\n def copy_to(self, target):\n target.data = self._data\n\n def copy_from(self, obj):\n self.data = obj.data\n\n def __getattr__(self, attr):\n return getattr(self._data, attr)\n\n def __setattr__(self, key, value):\n try:\n super(Entity, self).__setattr__(key, value)\n except AttributeError:\n return setattr(self._data, key, value)\n\n\n_threading_local = threading.local()\n\n\nclass BuildMode(object):\n def __init__(self):\n self.is_build_mode = False\n self._old_mode = None\n\n def __enter__(self):\n if self._old_mode is None:\n # check to prevent nested enter and exit\n self._old_mode = self.is_build_mode\n self.is_build_mode = True\n\n def __exit__(self, *_):\n if self._old_mode is not None:\n self.is_build_mode = self._old_mode\n self._old_mode = None\n\n\ndef build_mode():\n ret = getattr(_threading_local, 'build_mode', None)\n if ret is None:\n ret = BuildMode()\n _threading_local.build_mode = ret\n\n return ret\n\n\nclass SerializableWithKey(BaseWithKey, Serializable):\n _key = StringField('key')\n _id = StringField('id')\n\n\nclass AttributeAsDictKey(BaseWithKey, AttributeAsDict):\n _key = StringField('key')\n _id = StringField('id')\n\n\nclass ChunkData(SerializableWithKey):\n __slots__ = '__weakref__',\n\n # required fields\n _op = KeyField('op') # store key of operand here\n _shape = TupleField('shape', ValueType.int64,\n on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)\n # optional fields\n _index = TupleField('index', ValueType.uint32)\n _cached = BoolField('cached')\n _params = DictField('params', key_type=ValueType.string, on_deserialize=AttributeDict)\n\n def __init__(self, *args, **kwargs):\n extras = AttributeDict((k, kwargs.pop(k)) for k in set(kwargs) - set(self.__slots__))\n kwargs['_params'] = kwargs.pop('_params', extras)\n super(ChunkData, self).__init__(*args, **kwargs)\n\n def __repr__(self):\n return 'Chunk <op={0}, key={1}>'.format(self.op.__class__.__name__, self.key)\n\n @classmethod\n def cls(cls, provider):\n if provider.type == ProviderType.protobuf:\n from .serialize.protos.chunk_pb2 import ChunkDef\n return ChunkDef\n return super(ChunkData, cls).cls(provider)\n\n @property\n def shape(self):\n return getattr(self, '_shape', None)\n\n @property\n def ndim(self):\n return len(self.shape)\n\n @property\n def index(self):\n return getattr(self, '_index', None)\n\n @property\n def op(self):\n try:\n return self._op\n except AttributeError:\n return None\n\n @property\n def cached(self):\n return getattr(self, '_cached', None)\n\n @property\n def inputs(self):\n return self.op.inputs\n\n @inputs.setter\n def inputs(self, new_inputs):\n self.op.inputs = new_inputs\n\n @property\n def composed(self):\n return getattr(self, '_composed', None)\n\n @property\n def device(self):\n return self.op.device\n\n def is_sparse(self):\n return self.op.is_sparse()\n\n issparse = is_sparse\n\n def update_key(self):\n object.__setattr__(self, '_key', tokenize(\n type(self), *(getattr(self, k, None) for k in self.__slots__ if k != '_index')))\n\n\nclass Chunk(Entity):\n __slots__ = ()\n _allow_data_type_ = (ChunkData,)\n\n\nclass TilesableData(SerializableWithKey, Tilesable):\n __slots__ = '__weakref__', '_siblings', '_cix'\n _no_copy_attrs_ = SerializableWithKey._no_copy_attrs_ | {'_cix'}\n\n # required fields\n _shape = TupleField('shape', ValueType.int64,\n on_serialize=on_serialize_shape, on_deserialize=on_deserialize_shape)\n _op = KeyField('op')\n # optional fields\n # `nsplits` means the sizes of chunks for each dimension\n _nsplits = TupleField('nsplits', ValueType.tuple(ValueType.uint64))\n _params = DictField('params', key_type=ValueType.string, on_deserialize=AttributeDict)\n\n def __init__(self, *args, **kwargs):\n extras = AttributeDict((k, kwargs.pop(k)) for k in set(kwargs) - set(self.__slots__))\n kwargs['_params'] = kwargs.pop('_params', extras)\n if '_nsplits' in kwargs:\n kwargs['_nsplits'] = tuple(tuple(s) for s in kwargs['_nsplits'])\n\n super(TilesableData, self).__init__(*args, **kwargs)\n\n if hasattr(self, '_chunks') and self._chunks:\n self._chunks = sorted(self._chunks, key=attrgetter('index'))\n\n @property\n def ndim(self):\n return len(self.shape)\n\n def __len__(self):\n try:\n return self.shape[0]\n except IndexError:\n if build_mode().is_build_mode:\n return 0\n raise TypeError('len() of unsized object')\n\n @property\n def shape(self):\n if hasattr(self, '_shape') and self._shape is not None:\n return self._shape\n if hasattr(self, '_nsplits') and self._nsplits is not None:\n self._shape = tuple(builtins.sum(nsplit) for nsplit in self._nsplits)\n return self._shape\n\n def _update_shape(self, new_shape):\n self._shape = new_shape\n\n @property\n def chunk_shape(self):\n if hasattr(self, '_nsplits') and self._nsplits is not None:\n return tuple(map(len, self._nsplits))\n\n @property\n def chunks(self):\n return getattr(self, '_chunks', None)\n\n @property\n def op(self):\n return getattr(self, '_op', None)\n\n @property\n def nsplits(self):\n return getattr(self, '_nsplits', None)\n\n @nsplits.setter\n def nsplits(self, new_nsplits):\n self._nsplits = new_nsplits\n\n @property\n def size(self):\n return np.prod(self.shape).item()\n\n @property\n def inputs(self):\n return self.op.inputs or []\n\n @inputs.setter\n def inputs(self, new_inputs):\n self.op.inputs = new_inputs\n\n @property\n def params(self):\n return self._params\n\n @property\n def cix(self):\n if self.ndim == 0:\n return ChunksIndexer(self)\n\n try:\n if getattr(self, '_cix', None) is None:\n self._cix = ChunksIndexer(self)\n return self._cix\n except (TypeError, ValueError):\n return ChunksIndexer(self)\n\n def is_coarse(self):\n return not hasattr(self, '_chunks') or self._chunks is None or len(self._chunks) == 0\n\n def tiles(self):\n return handler.tiles(self)\n\n def single_tiles(self):\n return handler.single_tiles(self)\n\n def build_graph(self, graph=None, cls=DAG, tiled=False, compose=True, executed_keys=None):\n from .tensor.expressions.utils import convert_to_fetch\n\n executed_keys = executed_keys or []\n if tiled and self.is_coarse():\n self.tiles()\n\n graph = graph if graph is not None else cls()\n keys = None\n\n if tiled:\n nodes = list(c.data for c in self.chunks)\n keys = list(c.key for c in self.chunks)\n else:\n nodes = list(self.op.outputs)\n visited = set()\n while len(nodes) > 0:\n node = nodes.pop()\n\n # replace executed tensor/chunk by tensor/chunk with fetch op\n if node.key in executed_keys:\n node = convert_to_fetch(node)\n\n visited.add(node)\n if not graph.contains(node):\n graph.add_node(node)\n children = node.inputs or []\n for c in children:\n if c.key in executed_keys:\n continue\n if not graph.contains(c):\n graph.add_node(c)\n if not graph.has_successor(c, node):\n graph.add_edge(c, node)\n nodes.extend([c for c in itertools.chain(*[inp.op.outputs for inp in node.inputs or []])\n if c not in visited])\n if tiled and compose:\n graph.compose(keys=keys)\n return graph\n\n def visualize(self, graph_attrs=None, node_attrs=None, **kw):\n from graphviz import Source\n\n g = self.build_graph(**kw)\n dot = g.to_dot(graph_attrs=graph_attrs, node_attrs=node_attrs)\n\n return Source(dot)\n\n\nclass ChunksIndexer(object):\n __slots__ = '_tilesable',\n\n def __init__(self, tilesable):\n self._tilesable = tilesable\n\n def __getitem__(self, item):\n if isinstance(item, tuple):\n if len(item) == 0 and self._tilesable.is_scalar():\n return self._tilesable.chunks[0]\n elif all(np.issubdtype(type(it), np.integer) for it in item):\n if len(item) != self._tilesable.ndim:\n raise ValueError('Cannot get tensor chunk by %s, expect length %d' % (\n item, self._tilesable.ndim))\n\n s = self._tilesable.chunk_shape\n item = tuple(i if i >= 0 else i + s for i, s in zip(item, s))\n idx = sum(idx * reduce(mul, s[i+1:], 1) for i, idx\n in zip(itertools.count(0), item))\n return self._tilesable._chunks[idx]\n\n raise ValueError('Cannot get tensor chunk by {0}'.format(item))\n\n\nclass TilesableOperandMixin(object):\n __slots__ = ()\n\n def check_inputs(self, inputs):\n pass\n\n def _create_chunk(self, output_idx, index, shape, **kw):\n raise NotImplementedError\n\n def new_chunks(self, inputs, shape, index=None, output_limit=None, kws=None, **kw):\n output_limit = getattr(self, 'output_limit') if output_limit is None else output_limit\n\n self.check_inputs(inputs)\n getattr(self, '_set_inputs')(inputs)\n if getattr(self, '_key', None) is None:\n getattr(self, 'update_key')() # update key when inputs are set\n\n if isinstance(shape, (list, tuple)) and len(shape) > 0 and isinstance(shape[0], (list, tuple)):\n if len(shape) != output_limit:\n raise ValueError('shape size must be equal to output limit, expect {0}, got {1}'.format(\n output_limit, len(shape)))\n else:\n shape = [shape] * output_limit\n\n chunks = []\n raw_index = index\n for j, s in enumerate(shape):\n create_chunk_kw = kw.copy()\n if kws:\n create_chunk_kw.update(kws[j])\n index = create_chunk_kw.pop('index', raw_index)\n chunk = self._create_chunk(j, index, s, **create_chunk_kw)\n chunks.append(chunk)\n\n setattr(self, 'outputs', chunks)\n return chunks\n\n def _create_entity(self, output_idx, shape, nsplits, chunks, **kw):\n raise NotImplementedError\n\n def new_entities(self, inputs, shape, chunks=None, nsplits=None, output_limit=None,\n kws=None, **kw):\n output_limit = getattr(self, 'output_limit') if output_limit is None else output_limit\n\n self.check_inputs(inputs)\n getattr(self, '_set_inputs')(inputs)\n if getattr(self, '_key', None) is None:\n getattr(self, 'update_key')() # update key when inputs are set\n\n if isinstance(shape, (list, tuple)) and len(shape) > 0 and isinstance(shape[0], (list, tuple)):\n if not np.isinf(output_limit) and len(shape) != output_limit:\n raise ValueError('shape size must be equal to output limit, expect {0}, got {1}'.format(\n output_limit, len(shape)))\n else:\n shape = [shape] * output_limit\n\n entities = []\n raw_chunks = chunks\n raw_nsplits = nsplits\n for j, s in enumerate(shape):\n create_tensor_kw = kw.copy()\n if kws:\n create_tensor_kw.update(kws[j])\n chunks = create_tensor_kw.pop('chunks', raw_chunks)\n nsplits = create_tensor_kw.pop('nsplits', raw_nsplits)\n entity = self._create_entity(j, s, nsplits, chunks, **create_tensor_kw)\n entities.append(entity)\n\n setattr(self, 'outputs', entities)\n if len(entities) > 1:\n # for each output tensor, hold the reference to the other outputs\n # so that either no one or everyone are gc collected\n for j, t in enumerate(entities):\n t.data._siblings = [tensor.data for tensor in entities[:j] + entities[j+1:]]\n return entities\n\n def new_chunk(self, inputs, shape, index=None, **kw):\n if getattr(self, 'output_limit') != 1:\n raise TypeError('cannot new chunk with more than 1 outputs')\n\n return self.new_chunks(inputs, shape, index=index, **kw)[0]\n"
] |
[
[
"numpy.ones"
],
[
"numpy.errstate"
],
[
"numpy.isinf",
"numpy.prod"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
jylong9/tf-quant-finance
|
[
"7990f016dceda8249990abec58fbb5f5e02a747e"
] |
[
"tf_quant_finance/black_scholes/vanilla_prices.py"
] |
[
"# Lint as: python3\n# Copyright 2019 Google LLC\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# https://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\"\"\"Black Scholes prices of a batch of European options.\"\"\"\n\nimport numpy as np\nimport tensorflow.compat.v2 as tf\n\n\ndef option_price(*,\n volatilities,\n strikes,\n expiries,\n spots=None,\n forwards=None,\n discount_rates=None,\n continuous_dividends=None,\n cost_of_carries=None,\n discount_factors=None,\n is_call_options=None,\n is_normal_volatility=False,\n dtype=None,\n name=None):\n \"\"\"Computes the Black Scholes price for a batch of call or put options.\n\n #### Example\n\n ```python\n # Price a batch of 5 vanilla call options.\n volatilities = np.array([0.0001, 102.0, 2.0, 0.1, 0.4])\n forwards = np.array([1.0, 2.0, 3.0, 4.0, 5.0])\n # Strikes will automatically be broadcasted to shape [5].\n strikes = np.array([3.0])\n # Expiries will be broadcast to shape [5], i.e. each option has strike=3\n # and expiry = 1.\n expiries = 1.0\n computed_prices = tff.black_scholes.option_price(\n volatilities=volatilities,\n strikes=strikes,\n expiries=expiries,\n forwards=forwards)\n # Expected print output of computed prices:\n # [ 0. 2. 2.04806848 1.00020297 2.07303131]\n ```\n\n #### References:\n [1] Hull, John C., Options, Futures and Other Derivatives. Pearson, 2018.\n [2] Wikipedia contributors. Black-Scholes model. Available at:\n https://en.wikipedia.org/w/index.php?title=Black%E2%80%93Scholes_model\n\n Args:\n volatilities: Real `Tensor` of any shape and dtype. The volatilities to\n expiry of the options to price.\n strikes: A real `Tensor` of the same dtype and compatible shape as\n `volatilities`. The strikes of the options to be priced.\n expiries: A real `Tensor` of same dtype and compatible shape as\n `volatilities`. The expiry of each option. The units should be such that\n `expiry * volatility**2` is dimensionless.\n spots: A real `Tensor` of any shape that broadcasts to the shape of the\n `volatilities`. The current spot price of the underlying. Either this\n argument or the `forwards` (but not both) must be supplied.\n forwards: A real `Tensor` of any shape that broadcasts to the shape of\n `volatilities`. The forwards to maturity. Either this argument or the\n `spots` must be supplied but both must not be supplied.\n discount_rates: An optional real `Tensor` of same dtype as the\n `volatilities` and of the shape that broadcasts with `volatilities`.\n If not `None`, discount factors are calculated as e^(-rT),\n where r are the discount rates, or risk free rates. At most one of\n discount_rates and discount_factors can be supplied.\n Default value: `None`, equivalent to r = 0 and discount factors = 1 when\n discount_factors also not given.\n continuous_dividends: An optional real `Tensor` of same dtype as the\n `volatilities` and of the shape that broadcasts with `volatilities`.\n If not `None`, `cost_of_carries` is calculated as r - q,\n where r are the `discount_rates` and q is `continuous_dividends`. Either\n this or `cost_of_carries` can be given.\n Default value: `None`, equivalent to q = 0.\n cost_of_carries: An optional real `Tensor` of same dtype as the\n `volatilities` and of the shape that broadcasts with `volatilities`.\n Cost of storing a physical commodity, the cost of interest paid when\n long, or the opportunity cost, or the cost of paying dividends when short.\n If not `None`, and `spots` is supplied, used to calculate forwards from\n `spots`: F = e^(bT) * S, where F is the forwards price, b is the cost of\n carries, T is expiries and S is the spot price. If `None`, value assumed\n to be equal to the `discount_rate` - `continuous_dividends`\n Default value: `None`, equivalent to b = r.\n discount_factors: An optional real `Tensor` of same dtype as the\n `volatilities`. If not `None`, these are the discount factors to expiry\n (i.e. e^(-rT)). Mutually exclusive with discount_rate and cost_of_carry.\n If neither is given, no discounting is applied (i.e. the undiscounted\n option price is returned). If `spots` is supplied and `discount_factors`\n is not `None` then this is also used to compute the forwards to expiry.\n At most one of discount_rates and discount_factors can be supplied.\n Default value: `None`, which maps to e^(-rT) calculated from\n discount_rates.\n is_call_options: A boolean `Tensor` of a shape compatible with\n `volatilities`. Indicates whether the option is a call (if True) or a put\n (if False). If not supplied, call options are assumed.\n is_normal_volatility: An optional Python boolean specifying whether the\n `volatilities` correspond to lognormal Black volatility (if False) or\n normal Black volatility (if True).\n Default value: False, which corresponds to lognormal volatility.\n dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion\n of any supplied non-`Tensor` arguments to `Tensor`.\n Default value: `None` which maps to the default dtype inferred by\n TensorFlow.\n name: str. The name for the ops created by this function.\n Default value: `None` which is mapped to the default name `option_price`.\n\n Returns:\n option_prices: A `Tensor` of the same shape as `forwards`. The Black\n Scholes price of the options.\n\n Raises:\n ValueError: If both `forwards` and `spots` are supplied or if neither is\n supplied.\n ValueError: If both `discount_rates` and `discount_factors` is supplied.\n ValueError: If both `continuous_dividends` and `cost_of_carries` is\n supplied.\n \"\"\"\n if (spots is None) == (forwards is None):\n raise ValueError('Either spots or forwards must be supplied but not both.')\n if (discount_rates is not None) and (discount_factors is not None):\n raise ValueError('At most one of discount_rates and discount_factors may '\n 'be supplied')\n if (continuous_dividends is not None) and (cost_of_carries is not None):\n raise ValueError('At most one of continuous_dividends and cost_of_carries '\n 'may be supplied')\n\n with tf.name_scope(name or 'option_price'):\n strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')\n dtype = strikes.dtype\n volatilities = tf.convert_to_tensor(\n volatilities, dtype=dtype, name='volatilities')\n expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')\n\n if discount_rates is not None:\n discount_rates = tf.convert_to_tensor(\n discount_rates, dtype=dtype, name='discount_rates')\n elif discount_factors is not None:\n discount_factors = tf.convert_to_tensor(\n discount_factors, dtype=dtype, name='discount_factors')\n discount_rates = -tf.math.log(discount_factors) / expiries\n else:\n discount_rates = tf.convert_to_tensor(\n 0.0, dtype=dtype, name='discount_rates')\n\n if continuous_dividends is None:\n continuous_dividends = tf.convert_to_tensor(\n 0.0, dtype=dtype, name='continuous_dividends')\n\n if cost_of_carries is not None:\n cost_of_carries = tf.convert_to_tensor(\n cost_of_carries, dtype=dtype, name='cost_of_carries')\n else:\n cost_of_carries = discount_rates - continuous_dividends\n\n if discount_factors is not None:\n discount_factors = tf.convert_to_tensor(\n discount_factors, dtype=dtype, name='discount_factors')\n else:\n discount_factors = tf.exp(-discount_rates * expiries)\n\n if forwards is not None:\n forwards = tf.convert_to_tensor(forwards, dtype=dtype, name='forwards')\n else:\n spots = tf.convert_to_tensor(spots, dtype=dtype, name='spots')\n forwards = spots * tf.exp(cost_of_carries * expiries)\n\n sqrt_var = volatilities * tf.math.sqrt(expiries)\n if not is_normal_volatility: # lognormal model\n d1 = tf.math.divide_no_nan(tf.math.log(forwards / strikes),\n sqrt_var) + sqrt_var / 2\n d2 = d1 - sqrt_var\n undiscounted_calls = tf.where(sqrt_var > 0,\n forwards * _ncdf(d1) - strikes * _ncdf(d2),\n tf.maximum(forwards - strikes, 0.0))\n else: # normal model\n d1 = tf.math.divide_no_nan((forwards - strikes), sqrt_var)\n undiscounted_calls = tf.where(\n sqrt_var > 0.0, (forwards - strikes) * _ncdf(d1) +\n sqrt_var * tf.math.exp(-0.5 * d1**2) / np.sqrt(2 * np.pi),\n tf.maximum(forwards - strikes, 0.0))\n\n if is_call_options is None:\n return discount_factors * undiscounted_calls\n undiscounted_forward = forwards - strikes\n undiscounted_puts = undiscounted_calls - undiscounted_forward\n predicate = tf.broadcast_to(is_call_options, tf.shape(undiscounted_calls))\n return discount_factors * tf.where(predicate, undiscounted_calls,\n undiscounted_puts)\n\n\ndef barrier_price(*,\n volatilities,\n strikes,\n expiries,\n spots,\n barriers,\n rebates=None,\n discount_rates=None,\n continuous_dividends=None,\n cost_of_carries=None,\n is_barrier_down=None,\n is_knock_out=None,\n is_call_options=None,\n dtype=None,\n name=None):\n \"\"\"Prices barrier options in a Black-Scholes Model.\n\n Computes the prices of options with a single barrier in Black-Scholes world as\n described in Ref. [1]. Note that the barrier is applied continuously.\n\n #### Example\n\n This example is taken from Ref. [2], Page 154.\n\n ```python\n import tf_quant_finance as tff\n\n dtype = np.float32\n discount_rates = np.array([.08, .08])\n continuous_dividends = np.array([.04, .04])\n spots = np.array([100., 100.])\n strikes = np.array([90., 90.])\n barriers = np.array([95. 95.])\n rebates = np.array([3. 3.])\n volatilities = np.array([.25, .25])\n expiries = np.array([.5, .5])\n barriers_type = np.array([5, 1])\n is_barrier_down = np.array([True, False])\n is_knock_out = np.array([False, False])\n is_call_option = np.array([True, True])\n\n price = tff.black_scholes.barrier_price(\n discount_rates, continuous_dividends, spots, strikes,\n barriers, rebates, volatilities,\n expiries, is_barrier_down, is_knock_out, is_call_options)\n\n # Expected output\n # `Tensor` with values [9.024, 7.7627]\n ```\n\n #### References\n\n [1]: Lee Clewlow, Javier Llanos, Chris Strickland, Caracas Venezuela\n Pricing Exotic Options in a Black-Scholes World, 1994\n https://warwick.ac.uk/fac/soc/wbs/subjects/finance/research/wpaperseries/1994/94-54.pdf\n [2]: Espen Gaarder Haug, The Complete Guide to Option Pricing Formulas,\n 2nd Edition, 1997\n\n Args:\n volatilities: Real `Tensor` of any shape and dtype. The volatilities to\n expiry of the options to price.\n strikes: A real `Tensor` of the same dtype and compatible shape as\n `volatilities`. The strikes of the options to be priced.\n expiries: A real `Tensor` of same dtype and compatible shape as\n `volatilities`. The expiry of each option. The units should be such that\n `expiry * volatility**2` is dimensionless.\n spots: A real `Tensor` of any shape that broadcasts to the shape of the\n `volatilities`. The current spot price of the underlying.\n barriers: A real `Tensor` of same dtype as the `volatilities` and of the\n shape that broadcasts with `volatilities`. The barriers of each option.\n rebates: A real `Tensor` of same dtype as the `volatilities` and of the\n shape that broadcasts with `volatilities`. For knockouts, this is a\n fixed cash payout in case the barrier is breached. For knockins, this is a\n fixed cash payout in case the barrier level is not breached. In the former\n case, the rebate is paid immediately on breach whereas in the latter, the\n rebate is paid at the expiry of the option.\n Default value: `None` which maps to no rebates.\n discount_rates: A real `Tensor` of same dtype as the\n `volatilities` and of the shape that broadcasts with `volatilities`.\n Discount rates, or risk free rates.\n Default value: `None`, equivalent to discount_rate = 0.\n continuous_dividends: A real `Tensor` of same dtype as the\n `volatilities` and of the shape that broadcasts with `volatilities`. A\n continuous dividend rate paid by the underlier. If `None`, then\n defaults to zero dividends.\n Default value: `None`, equivalent to zero dividends.\n cost_of_carries: A optional real `Tensor` of same dtype as the\n `volatilities` and of the shape that broadcasts with `volatilities`.\n Cost of storing a physical commodity, the cost of interest paid when\n long, or the opportunity cost, or the cost of paying dividends when short.\n If not `None`, `continuous_dividends` is calculated as r - c,\n where r are the `discount_rates` and c is `cost_of_carries`.\n is_barrier_down: A real `Tensor` of `boolean` values and of the shape\n that broadcasts with `volatilities`. True if barrier is below asset\n price at expiration.\n Default value: `True`.\n is_knock_out: A real `Tensor` of `boolean` values and of the shape\n that broadcasts with `volatilities`. True if option is knock out\n else false.\n Default value: `True`.\n is_call_options: A real `Tensor` of `boolean` values and of the shape\n that broadcasts with `volatilities`. True if option is call else\n false.\n Default value: `True`.\n dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion\n of any supplied non-`Tensor` arguments to `Tensor`.\n Default value: `None` which maps to the default dtype inferred by\n TensorFlow.\n name: str. The name for the ops created by this function.\n Default value: `None` which is mapped to the default name `barrier_price`.\n Returns:\n option_prices: A `Tensor` of same shape as `spots`. The approximate price of\n the barriers option under black scholes.\n \"\"\"\n # The computation is done as in Ref [2] where each integral is split into\n # two matrices. The first matrix contains the algebraic terms and the second\n # matrix contains the probability distribution terms. Masks are used to filter\n # appropriate terms for calculating the integral. Then a dot product of each\n # row in the matricies coupled with the masks work to calculate the prices of\n # the barriers option.\n if (continuous_dividends is not None) and (cost_of_carries is not None):\n raise ValueError('At most one of continuous_dividends and cost of carries '\n 'may be supplied')\n with tf.name_scope(name or 'barrier_price'):\n spots = tf.convert_to_tensor(spots, dtype=dtype, name='spots')\n dtype = spots.dtype\n strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')\n volatilities = tf.convert_to_tensor(\n volatilities, dtype=dtype, name='volatilities')\n expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')\n barriers = tf.convert_to_tensor(barriers, dtype=dtype, name='barriers')\n if rebates is not None:\n rebates = tf.convert_to_tensor(rebates, dtype=dtype, name='rebates')\n else:\n rebates = tf.zeros_like(spots, dtype=dtype, name='rebates')\n\n # Convert all to tensor and enforce float dtype where required\n if discount_rates is not None:\n discount_rates = tf.convert_to_tensor(\n discount_rates, dtype=dtype, name='discount_rates')\n else:\n discount_rates = tf.convert_to_tensor(\n 0.0, dtype=dtype, name='discount_rates')\n\n if continuous_dividends is None:\n continuous_dividends = tf.convert_to_tensor(\n 0.0, dtype=dtype, name='continuous_dividends')\n\n if cost_of_carries is not None:\n cost_of_carries = tf.convert_to_tensor(\n cost_of_carries, dtype=dtype, name='cost_of_carries')\n else:\n cost_of_carries = discount_rates - continuous_dividends\n\n if is_barrier_down is None:\n is_barrier_down = tf.constant(1, name='is_barrier_down')\n else:\n is_barrier_down = tf.convert_to_tensor(is_barrier_down, dtype=tf.bool,\n name='is_barrier_down')\n is_barrier_down = tf.where(is_barrier_down, 1, 0)\n if is_knock_out is None:\n is_knock_out = tf.constant(1, name='is_knock_out')\n else:\n is_knock_out = tf.convert_to_tensor(is_knock_out, dtype=tf.bool,\n name='is_knock_out')\n is_knock_out = tf.where(is_knock_out, 1, 0)\n if is_call_options is None:\n is_call_options = tf.constant(1, name='is_call_options')\n else:\n is_call_options = tf.convert_to_tensor(is_call_options, dtype=tf.bool,\n name='is_call_options')\n is_call_options = tf.where(is_call_options, 1, 0)\n\n # Indices which range from 0-7 are used to select the appropriate\n # mask for each barrier\n indices = tf.bitwise.left_shift(\n is_barrier_down, 2) + tf.bitwise.left_shift(\n is_knock_out, 1) + is_call_options\n\n # Masks select the appropriate terms for integral approximations\n # Integrals are seperated by algebraic terms and probability\n # distribution terms. This give 12 different terms per matrix\n # (6 integrals, 2 terms each)\n # shape = [8, 12]\n mask_matrix_greater_strike = tf.constant([\n [1, 1, -1, -1, 0, 0, 1, 1, 1, 1, 0, 0], # up and in put\n [1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0], # up and in call\n [0, 0, 1, 1, 0, 0, -1, -1, 0, 0, 1, 1], # up and out put\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], # up and out call\n [0, 0, 1, 1, -1, -1, 1, 1, 0, 0, 1, 1], # down and in put\n [0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0], # down and in call\n [1, 1, -1, -1, 1, 1, -1, -1, 0, 0, 1, 1], # down and out put\n [1, 1, 0, 0, -1, -1, 0, 0, 0, 0, 1, 1]]) # down and out call\n\n mask_matrix_lower_strike = tf.constant([\n [0, 0, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0], # up and in put\n [0, 0, 1, 1, -1, -1, 1, 1, 1, 1, 0, 0], # up and in call\n [1, 1, 0, 0, -1, -1, 0, 0, 0, 0, 1, 1], # up and out put\n [1, 1, -1, -1, 1, 1, -1, -1, 0, 0, 1, 1], # up and out call\n [1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0], # down and in put\n [1, 1, -1, -1, 0, 0, 1, 1, 1, 1, 0, 0], # down and in call\n [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1], # down and out put\n [0, 0, 1, 1, 0, 0, -1, -1, 0, 0, 1, 1]]) # down and out call\n\n # Create masks\n # Masks are shape [strikes.shape, 12]\n masks_lower = tf.gather(mask_matrix_lower_strike, indices, axis=0)\n masks_greater = tf.gather(mask_matrix_greater_strike, indices, axis=0)\n strikes_greater = tf.expand_dims(strikes > barriers, axis=-1)\n masks = tf.where(strikes_greater, masks_greater, masks_lower)\n masks = tf.cast(masks, dtype=dtype)\n one = tf.constant(1, dtype=dtype)\n call_or_put = tf.cast(tf.where(tf.equal(is_call_options, 0), -one, one),\n dtype=dtype)\n below_or_above = tf.cast(tf.where(tf.equal(is_barrier_down, 0), -one, one),\n dtype=dtype)\n\n # Calculate params for integrals\n sqrt_var = volatilities * tf.math.sqrt(expiries)\n mu = (cost_of_carries) - ((volatilities**2) / 2)\n lamda = 1 + (mu / (volatilities**2))\n x = (tf.math.log(spots / strikes) / (sqrt_var)) + (lamda * sqrt_var)\n x1 = (tf.math.log(spots / barriers) / (sqrt_var)) + (lamda * sqrt_var)\n y = (tf.math.log((barriers**2) / (spots * strikes)) / (\n sqrt_var)) + (lamda * sqrt_var)\n y1 = (tf.math.log(barriers / spots) / (sqrt_var)) + (lamda * sqrt_var)\n b = ((mu**2) + (2 * (volatilities**2) * discount_rates)) / (volatilities**2)\n z = (tf.math.log(barriers / spots) / (sqrt_var)) + (b * sqrt_var)\n a = mu / (volatilities**2)\n\n # Other params used for integrals\n discount_rates_exponent = tf.math.exp(-discount_rates * expiries,\n name='discount_rates_exponent')\n continuous_dividends_exponent = tf.math.exp(\n (cost_of_carries-discount_rates) * expiries,\n name='continuous_dividends_exponent')\n barriers_ratio = tf.math.divide(barriers, spots, name='barriers_ratio')\n spots_term = call_or_put * spots * continuous_dividends_exponent\n strikes_term = call_or_put * strikes * discount_rates_exponent\n\n # rank is used to stack elements and reduce_sum\n strike_rank = strikes.shape.rank\n\n # Constructing Matrix with first and second algebraic terms for each\n # integral [strike.shape, 12]\n terms_mat = tf.stack(\n (spots_term, -strikes_term,\n spots_term, -strikes_term,\n spots_term * (barriers_ratio**(2 * lamda)),\n -strikes_term * (barriers_ratio**((2 * lamda) - 2)),\n spots_term * (barriers_ratio**(2 * lamda)),\n -strikes_term * (barriers_ratio**((2 * lamda) - 2)),\n rebates * discount_rates_exponent,\n -rebates * discount_rates_exponent * (\n barriers_ratio**((2 * lamda) - 2)),\n rebates * (barriers_ratio**(a + b)),\n rebates * (barriers_ratio**(a - b))),\n name='term_matrix', axis=strike_rank)\n\n # Constructing Matrix with first and second norm for each integral\n # [strikes.shape, 12]\n cdf_mat = tf.stack(\n (call_or_put * x,\n call_or_put * (x - sqrt_var),\n call_or_put * x1,\n call_or_put * (x1 - sqrt_var),\n below_or_above * y,\n below_or_above * (y - sqrt_var),\n below_or_above * y1,\n below_or_above * (y1 - sqrt_var),\n below_or_above * (x1 - sqrt_var),\n below_or_above * (y1 - sqrt_var),\n below_or_above * z,\n below_or_above * (z - (2 * b * sqrt_var))),\n name='cdf_matrix', axis=strike_rank)\n cdf_mat = _ncdf(cdf_mat)\n # Calculating and returning price for each option\n return tf.reduce_sum(masks * terms_mat * cdf_mat, axis=strike_rank)\n\n\n# TODO(b/154806390): Binary price signature should be the same as that of the\n# vanilla price.\ndef binary_price(*,\n volatilities,\n strikes,\n expiries,\n spots=None,\n forwards=None,\n discount_factors=None,\n is_call_options=None,\n dtype=None,\n name=None):\n \"\"\"Computes the Black Scholes price for a batch of binary call or put options.\n\n The binary call (resp. put) option priced here is that which pays off a unit\n of cash if the underlying asset has a value greater (resp. smaller) than the\n strike price at expiry. Hence the binary option price is the discounted\n probability that the asset will end up higher (resp. lower) than the\n strike price at expiry.\n\n #### Example\n\n ```python\n # Price a batch of 5 binary call options.\n volatilities = np.array([0.0001, 102.0, 2.0, 0.1, 0.4])\n forwards = np.array([1.0, 2.0, 3.0, 4.0, 5.0])\n # Strikes will automatically be broadcasted to shape [5].\n strikes = np.array([3.0])\n # Expiries will be broadcast to shape [5], i.e. each option has strike=3\n # and expiry = 1.\n expiries = 1.0\n computed_prices = tff.black_scholes.binary_price(\n volatilities=volatilities,\n strikes=strikes,\n expiries=expiries,\n forwards=forwards)\n # Expected print output of prices:\n # [0. 0. 0.15865525 0.99764937 0.85927418]\n ```\n\n #### References:\n\n [1] Hull, John C., Options, Futures and Other Derivatives. Pearson, 2018.\n [2] Wikipedia contributors. Binary option. Available at:\n https://en.wikipedia.org/w/index.php?title=Binary_option\n\n Args:\n volatilities: Real `Tensor` of any shape and dtype. The volatilities to\n expiry of the options to price.\n strikes: A real `Tensor` of the same dtype and compatible shape as\n `volatilities`. The strikes of the options to be priced.\n expiries: A real `Tensor` of same dtype and compatible shape as\n `volatilities`. The expiry of each option. The units should be such that\n `expiry * volatility**2` is dimensionless.\n spots: A real `Tensor` of any shape that broadcasts to the shape of the\n `volatilities`. The current spot price of the underlying. Either this\n argument or the `forwards` (but not both) must be supplied.\n forwards: A real `Tensor` of any shape that broadcasts to the shape of\n `volatilities`. The forwards to maturity. Either this argument or the\n `spots` must be supplied but both must not be supplied.\n discount_factors: An optional real `Tensor` of same dtype as the\n `volatilities`. If not None, these are the discount factors to expiry\n (i.e. e^(-rT)). If None, no discounting is applied (i.e. the undiscounted\n option price is returned). If `spots` is supplied and `discount_factors`\n is not None then this is also used to compute the forwards to expiry.\n Default value: None, equivalent to discount factors = 1.\n is_call_options: A boolean `Tensor` of a shape compatible with\n `volatilities`. Indicates whether the option is a call (if True) or a put\n (if False). If not supplied, call options are assumed.\n dtype: Optional `tf.DType`. If supplied, the dtype to be used for conversion\n of any supplied non-`Tensor` arguments to `Tensor`.\n Default value: None which maps to the default dtype inferred by TensorFlow\n (float32).\n name: str. The name for the ops created by this function.\n Default value: None which is mapped to the default name `binary_price`.\n\n Returns:\n binary_prices: A `Tensor` of the same shape as `forwards`. The Black\n Scholes price of the binary options.\n\n Raises:\n ValueError: If both `forwards` and `spots` are supplied or if neither is\n supplied.\n \"\"\"\n if (spots is None) == (forwards is None):\n raise ValueError('Either spots or forwards must be supplied but not both.')\n\n with tf.name_scope(name or 'binary_price'):\n strikes = tf.convert_to_tensor(strikes, dtype=dtype, name='strikes')\n dtype = strikes.dtype\n volatilities = tf.convert_to_tensor(\n volatilities, dtype=dtype, name='volatilities')\n expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')\n\n if discount_factors is None:\n discount_factors = tf.convert_to_tensor(\n 1.0, dtype=dtype, name='discount_factors')\n else:\n discount_factors = tf.convert_to_tensor(\n discount_factors, dtype=dtype, name='discount_factors')\n\n if forwards is not None:\n forwards = tf.convert_to_tensor(forwards, dtype=dtype, name='forwards')\n else:\n spots = tf.convert_to_tensor(spots, dtype=dtype, name='spots')\n forwards = spots / discount_factors\n\n sqrt_var = volatilities * tf.math.sqrt(expiries)\n d1 = (tf.math.log(forwards / strikes) + sqrt_var * sqrt_var / 2) / sqrt_var\n d2 = d1 - sqrt_var\n undiscounted_calls = _ncdf(d2)\n if is_call_options is None:\n return discount_factors * undiscounted_calls\n is_call_options = tf.convert_to_tensor(is_call_options,\n dtype=tf.bool,\n name='is_call_options')\n undiscounted_puts = 1 - undiscounted_calls\n predicate = tf.broadcast_to(is_call_options, tf.shape(undiscounted_calls))\n return discount_factors * tf.where(predicate, undiscounted_calls,\n undiscounted_puts)\n\n\ndef swaption_price(*,\n volatilities,\n expiries,\n floating_leg_start_times,\n floating_leg_end_times,\n fixed_leg_payment_times,\n floating_leg_daycount_fractions,\n fixed_leg_daycount_fractions,\n fixed_leg_coupon,\n floating_leg_start_times_discount_factors,\n floating_leg_end_times_discount_factors,\n fixed_leg_payment_times_discount_factors,\n notional=None,\n is_payer_swaption=None,\n is_normal_volatility=True,\n dtype=None,\n name=None):\n \"\"\"Calculates the price of European Swaptions using the Black model.\n\n A European Swaption is a contract that gives the holder an option to enter a\n swap contract at a future date at a prespecified fixed rate. A swaption that\n grants the holder to pay fixed rate and receive floating rate is called a\n payer swaption while the swaption that grants the holder to receive fixed and\n pay floating payments is called the receiver swaption. Typically the start\n date (or the inception date) of the swap coincides with the expiry of the\n swaption.\n\n #### Example\n The example shows how value a batch of 1y x 1y and 1y x 2y swaptions using the\n Black (normal) model for the swap rate.\n\n ````python\n import numpy as np\n import tensorflow.compat.v2 as tf\n import tf_quant_finance as tff\n\n dtype = tf.float64\n\n volatilities = [0.01, 0.005]\n expiries = [1.0, 1.0]\n float_leg_start_times = [[1.0, 1.25, 1.5, 1.75, 2.0, 2.0, 2.0, 2.0],\n [1.0, 1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75]]\n float_leg_end_times = [[1.25, 1.5, 1.75, 2.0, 2.0, 2.0, 2.0, 2.0],\n [1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]]\n fixed_leg_payment_times = [[1.25, 1.5, 1.75, 2.0, 2.0, 2.0, 2.0, 2.0],\n [1.25, 1.5, 1.75, 2.0, 2.25, 2.5, 2.75, 3.0]]\n float_leg_daycount_fractions = [[0.25, 0.25, 0.25, 0.25, 0.0, 0.0, 0.0, 0.0],\n [0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,\n 0.25]]\n fixed_leg_daycount_fractions = [[0.25, 0.25, 0.25, 0.25, 0.0, 0.0, 0.0, 0.0],\n [0.25, 0.25, 0.25, 0.25, 0.25, 0.25, 0.25,\n 0.25]]\n fixed_leg_coupon = [0.011, 0.011]\n discount_fn = lambda x: np.exp(-0.01 * np.array(x))\n price = self.evaluate(\n tff.black_scholes.swaption_price(\n volatilities=volatilities,\n expiries=expiries,\n floating_leg_start_times=float_leg_start_times,\n floating_leg_end_times=float_leg_end_times,\n fixed_leg_payment_times=fixed_leg_payment_times,\n floating_leg_daycount_fractions=float_leg_daycount_fractions,\n fixed_leg_daycount_fractions=fixed_leg_daycount_fractions,\n fixed_leg_coupon=fixed_leg_coupon,\n floating_leg_start_times_discount_factors=discount_fn(\n float_leg_start_times),\n floating_leg_end_times_discount_factors=discount_fn(\n float_leg_end_times),\n fixed_leg_payment_times_discount_factors=discount_fn(\n fixed_leg_payment_times),\n is_normal_volatility=is_normal_model,\n notional=100.,\n dtype=dtype))\n # Expected value: [0.3458467885511461, 0.3014786656395892] # shape = (2,)\n ````\n\n Args:\n volatilities: Real `Tensor` of any shape and dtype. The Black volatilities\n of the swaptions to price. The shape of this input determines the number\n (and shape) of swaptions to be priced and the shape of the output.\n expiries: A real `Tensor` of same shape and dtype as `volatilities`. The\n time to expiration of the swaptions.\n floating_leg_start_times: A real `Tensor` of the same dtype as\n `volatilities`. The times when accrual begins for each payment in the\n floating leg. The shape of this input should be `expiries.shape + [m]` or\n `batch_shape + [m]` where `m` denotes the number of floating payments in\n each leg.\n floating_leg_end_times: A real `Tensor` of the same dtype as `volatilities`.\n The times when accrual ends for each payment in the floating leg. The\n shape of this input should be `batch_shape + [m]` where `m` denotes\n the number of floating payments in each leg.\n fixed_leg_payment_times: A real `Tensor` of the same dtype as\n `volatilities`. The payment times for each payment in the fixed leg.\n The shape of this input should be `batch_shape + [n]` where `n` denotes\n the number of fixed payments in each leg.\n floating_leg_daycount_fractions: A real `Tensor` of the same dtype and\n compatible shape as `floating_leg_start_times`. The daycount fractions\n for each payment in the floating leg.\n fixed_leg_daycount_fractions: A real `Tensor` of the same dtype and\n compatible shape as `fixed_leg_payment_times`. The daycount fractions\n for each payment in the fixed leg.\n fixed_leg_coupon: A real `Tensor` of the same dtype and shape compatible\n to `batch_shape`. The fixed coupon rate for each payment in the fixed leg.\n floating_leg_start_times_discount_factors: A real `Tensor` of the same\n shape and dtype as `floating_leg_start_times`. The discount factors\n corresponding to `floating_leg_start_times`.\n floating_leg_end_times_discount_factors: A real `Tensor` of the same\n shape and dtype as `floating_leg_end_times`. The discount factors\n corresponding to `floating_leg_end_times`.\n fixed_leg_payment_times_discount_factors: A real `Tensor` of the same\n shape and dtype as `fixed_leg_payment_times`. The discount factors\n corresponding to `fixed_leg_payment_times`.\n notional: An optional `Tensor` of same dtype and compatible shape as\n `volatilities` specifying the notional amount for the underlying swap.\n Default value: None in which case the notional is set to 1.\n is_payer_swaption: A boolean `Tensor` of a shape compatible with `expiries`.\n Indicates whether the swaption is a payer (if True) or a receiver\n (if False) swaption. If not supplied, payer swaptions are assumed.\n is_normal_volatility: An optional Python boolean specifying whether the\n `volatilities` correspond to normal Black volatility (if True) or\n lognormal Black volatility (if False).\n Default value: True, which corresponds to normal volatility.\n dtype: The default dtype to use when converting values to `Tensor`s.\n Default value: `None` which means that default dtypes inferred by\n TensorFlow are used.\n name: Python string. The name to give to the ops created by this function.\n Default value: `None` which maps to the default name\n `hw_swaption_price`.\n\n Returns:\n A `Tensor` of real dtype and shape `batch_shape` containing the\n computed swaption prices.\n \"\"\"\n name = name or 'black_swaption_price'\n del floating_leg_daycount_fractions\n with tf.name_scope(name):\n volatilities = tf.convert_to_tensor(volatilities, dtype=dtype,\n name='volatilities')\n dtype = dtype or volatilities.dtype\n expiries = tf.convert_to_tensor(expiries, dtype=dtype, name='expiries')\n floating_leg_start_times = tf.convert_to_tensor(\n floating_leg_start_times, dtype=dtype, name='float_leg_start_times')\n floating_leg_end_times = tf.convert_to_tensor(\n floating_leg_end_times, dtype=dtype, name='float_leg_end_times')\n fixed_leg_payment_times = tf.convert_to_tensor(\n fixed_leg_payment_times, dtype=dtype, name='fixed_leg_payment_times')\n fixed_leg_daycount_fractions = tf.convert_to_tensor(\n fixed_leg_daycount_fractions, dtype=dtype,\n name='fixed_leg_daycount_fractions')\n fixed_leg_coupon = tf.convert_to_tensor(\n fixed_leg_coupon, dtype=dtype, name='fixed_leg_coupon')\n float_leg_start_times_discount_factors = tf.convert_to_tensor(\n floating_leg_start_times_discount_factors, dtype=dtype,\n name='float_leg_start_times_discount_factors')\n float_leg_end_times_discount_factors = tf.convert_to_tensor(\n floating_leg_end_times_discount_factors, dtype=dtype,\n name='float_leg_end_times_discount_factors')\n fixed_leg_payment_times_discount_factors = tf.convert_to_tensor(\n fixed_leg_payment_times_discount_factors, dtype=dtype,\n name='fixed_leg_payment_times_discount_factors')\n\n notional = tf.convert_to_tensor(notional, dtype=dtype, name='notional')\n if is_payer_swaption is None:\n is_payer_swaption = True\n is_payer_swaption = tf.convert_to_tensor(\n is_payer_swaption, dtype=tf.bool, name='is_payer_swaption')\n\n swap_annuity = tf.math.reduce_sum(\n fixed_leg_daycount_fractions * fixed_leg_payment_times_discount_factors,\n axis=-1)\n forward_swap_rate = tf.math.reduce_sum(\n float_leg_start_times_discount_factors -\n float_leg_end_times_discount_factors, axis=-1) / swap_annuity\n swaption_value = option_price(volatilities=volatilities,\n strikes=fixed_leg_coupon,\n expiries=expiries,\n forwards=forward_swap_rate,\n is_call_options=is_payer_swaption,\n is_normal_volatility=is_normal_volatility,\n dtype=dtype,\n name=name + '_option_price')\n return notional * swap_annuity * swaption_value\n\n\ndef _ncdf(x):\n return (tf.math.erf(x / _SQRT_2) + 1) / 2\n\n\n_SQRT_2 = np.sqrt(2.0, dtype=np.float64)\n"
] |
[
[
"tensorflow.compat.v2.exp",
"tensorflow.compat.v2.math.erf",
"numpy.sqrt",
"tensorflow.compat.v2.bitwise.left_shift",
"tensorflow.compat.v2.shape",
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.reduce_sum",
"tensorflow.compat.v2.math.exp",
"tensorflow.compat.v2.name_scope",
"tensorflow.compat.v2.where",
"tensorflow.compat.v2.math.reduce_sum",
"tensorflow.compat.v2.stack",
"tensorflow.compat.v2.expand_dims",
"tensorflow.compat.v2.gather",
"tensorflow.compat.v2.math.log",
"tensorflow.compat.v2.math.divide",
"tensorflow.compat.v2.equal",
"tensorflow.compat.v2.math.sqrt",
"tensorflow.compat.v2.math.divide_no_nan",
"tensorflow.compat.v2.constant",
"tensorflow.compat.v2.zeros_like",
"tensorflow.compat.v2.maximum",
"tensorflow.compat.v2.cast"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
SteffenBauer/Deep_RL
|
[
"6671c723098037cef1013af9a7f434df993c9d91",
"6671c723098037cef1013af9a7f434df993c9d91"
] |
[
"train_tromis.py",
"rl/agents/pg.py"
] |
[
"#!/usr/bin/env python3\n\nimport os\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\nos.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'\nimport tensorflow as tf\nimport tensorflow.keras as keras\ntf.get_logger().setLevel('ERROR')\n\nfrom rl.games import tromis\nfrom rl.agents import dqn\nfrom rl.memory import uniqmemory\nfrom rl.callbacks import history\n\nwidth, height = 7, 10\nnb_frames = 1\n\ngame = tromis.Tromis(width, height, max_turn=512)\n\ninp = keras.layers.Input(shape=(nb_frames, height, width, 3))\nx = keras.layers.Conv3D(32,5,padding='same',strides=1,activation='relu')(inp)\nx = keras.layers.AveragePooling3D(padding='same')(x)\nx = keras.layers.Conv3D(64,3,padding='same',strides=1,activation='relu')(x)\nx = keras.layers.GlobalAveragePooling3D()(x)\nx = keras.layers.Dense(128, activation='relu')(x)\nact = keras.layers.Dense(game.nb_actions, activation='linear')(x)\n\nmodel = keras.models.Model(inputs=inp, outputs=act)\nmodel.compile(keras.optimizers.RMSprop(), keras.losses.LogCosh())\nmodel.summary()\n\nparams = {\n 'batch_size': 256,\n 'epochs': 200,\n 'episodes': 100,\n 'train_freq': 32,\n 'target_sync': 512,\n 'epsilon_start': 0.5,\n 'epsilon_decay': 0.75,\n 'epsilon_final': 0.0,\n 'gamma': 0.92,\n 'reset_memory': False,\n 'observe': 100\n}\n\nrlparams = {\n 'rl.memory': 'UniqMemory',\n 'rl.memory_size': 65536,\n 'rl.optimizer': 'RMSprop',\n 'rl.with_target': True,\n 'rl.nb_frames': nb_frames\n}\n\ngameparams = {\n 'game.width': game.width,\n 'game.height': game.height,\n 'game.max_turn': game.max_turn\n}\n\nmemory = uniqmemory.UniqMemory(memory_size=rlparams['rl.memory_size'])\nagent = dqn.Agent(model, memory, with_target=rlparams['rl.with_target'])\n#history = history.HistoryLog(\"tromis\", {**params, **rlparams, **gameparams})\n\nagent.train(game, verbose=1, callbacks=[], **params)\n",
"#!/usr/bin/env python3\n\n# Policy algorithm by https://github.com/DeepReinforcementLearning/DeepReinforcementLearningInAction\n# Distributed there under the MIT license\n\nimport catch\nimport numpy as np\nimport keras\n\ngrid_size = 10\nl1 = grid_size*grid_size*3\nl2 = 150\nl3 = 3\nlearning_rate = 0.001\n\ndef generate_model():\n input_state = keras.layers.Input(shape=(l1,), name=\"Input_State\")\n x = keras.layers.Dense(l2)(input_state)\n x = keras.layers.LeakyReLU()(x)\n actions = keras.layers.Dense(l3, activation='softmax')(x)\n\n def loss_fn(y_true, y_pred):\n return -1.0 * keras.backend.sum(y_true * keras.backend.log(y_pred))\n\n model = keras.models.Model(inputs=input_state, outputs=actions)\n model.compile(loss=loss_fn, optimizer=keras.optimizers.RMSprop(learning_rate))\n\n return model\n\nmodel = generate_model()\nmodel.summary()\n\nMAX_DUR = 20\nMAX_EPISODES = 10000\ngamma_ = 0.95\ntime_steps = []\n\nenv = catch.Catch(grid_size=grid_size)\n\nwin_stats = []\nloss_stats = []\n\nfor episode in range(MAX_EPISODES):\n env.reset()\n curr_state = env.get_state().flatten()\n done = False\n transitions = [] # list of state, action, rewards\n \n for t in range(MAX_DUR): #while in episode\n act_prob = model.predict(np.expand_dims(np.asarray(curr_state), axis=0))\n action = np.random.choice(np.array([0,1,2]), p=act_prob[0])\n prev_state = curr_state\n curr_state, reward, done = env.play(action)\n curr_state = curr_state.flatten()\n transitions.append((prev_state, action, reward))\n if done:\n win_stats.append(1 if reward == 1.0 else 0)\n break\n\n # Optimize policy network with full episode\n ep_len = len(transitions) # episode length\n discounted_rewards = np.zeros((ep_len, l3))\n train_states = []\n for i in range(ep_len): #for each step in episode\n discount = 1.0\n future_reward = 0.0\n # discount rewards\n for i2 in range(i, ep_len):\n future_reward += transitions[i2][2] * discount\n discount = discount * gamma_\n discounted_rewards[i][transitions[i][1]] = future_reward\n train_states.append(transitions[i][0])\n train_states = np.asarray(train_states)\n # Backpropagate model with preds & discounted_rewards here\n loss = model.train_on_batch(train_states, discounted_rewards)\n loss_stats.append(loss)\n \n if len(win_stats) >= 100:\n print(\"Episode {: 4d} Win perc {:2.4f} Loss {:2.4f}\".format(episode, sum(win_stats)/100.0, sum(loss_stats)/100.0))\n win_stats = []\n loss_stats = []\n \n"
] |
[
[
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.GlobalAveragePooling3D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.losses.LogCosh",
"tensorflow.get_logger",
"tensorflow.keras.layers.AveragePooling3D",
"tensorflow.keras.layers.Input"
],
[
"numpy.asarray",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"2.7",
"2.2",
"2.3",
"2.4",
"2.5",
"2.6"
]
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
sebszyller/counterfit
|
[
"e3075b76e3616f2cf2e7767152d1be4852b4a213",
"e3075b76e3616f2cf2e7767152d1be4852b4a213"
] |
[
"counterfit/frameworks/textattack/textattack.py",
"counterfit/targets/movie_reviews/movie_reviews.py"
] |
[
"import re\nimport importlib\n\nfrom textattack import Attacker\nimport numpy as np\nfrom textattack.datasets import Dataset\n\nfrom counterfit.core.attacks import CFAttack\nfrom counterfit.core.frameworks import Framework\nfrom counterfit.core.utils import get_subclasses\nfrom counterfit.report.report_generator import get_target_data_type_obj\n\nclass TextAttackFramework(Framework):\n def __init__(self):\n super().__init__()\n\n def load(self, config_path=None):\n if config_path:\n self.load_from_config(config_path)\n else:\n self.load_attacks()\n self.loaded_status = True\n\n def load_attacks(self):\n base_import = importlib.import_module(\n \"textattack.attack_recipes\")\n attacks = get_subclasses(base_import.AttackRecipe)\n\n for attack_class in attacks:\n attack_name = re.findall(\n r\"\\w+\", str(attack_class).split(\".\")[-1].strip())[0]\n attack_category = \"BlackBox\"\n attack_type = \"IntegrityAttack\" if \"Seq\" in attack_name else \"EvasionAttack\"\n attack_data_tags = [\"text\"]\n attack_params = {}\n\n # Create the Attack Object\n if attack_name not in self.attacks.keys():\n self.add_attack(\n attack_name=attack_name,\n attack_class=attack_class,\n attack_type=attack_type,\n attack_category=attack_category,\n attack_data_tags=attack_data_tags,\n attack_default_params=attack_params\n )\n\n def create_dataset(self, cfattack):\n # return Dataset([(\"This is a test\", 1)])\n return Dataset(list(zip(cfattack.samples, cfattack.initial_labels)))\n\n def build(self, target, attack):\n\n class TextAttackWrapperObject(object):\n def __init__(self, predict_wrapper):\n self.model = predict_wrapper\n\n def __call__(self, x):\n return self.model(x)\n\n new_attack = attack.build(\n TextAttackWrapperObject(target.predict_wrapper))\n return new_attack\n\n def run(self, cfattack):\n\n # get labels for samples and zip with samples\n dataset = self.create_dataset(cfattack)\n\n new_attack = Attacker(cfattack.attack, dataset)\n results = new_attack.attack_dataset()\n return [r.perturbed_text() for r in results]\n\n def post_attack_processing(self, cfattack: CFAttack):\n\n current_datatype = cfattack.target.target_data_type\n current_dt_report_gen = get_target_data_type_obj(current_datatype)\n summary = current_dt_report_gen.get_run_summary(cfattack)\n current_dt_report_gen.print_run_summary(summary)\n\n def check_success(self, cfattack: CFAttack) -> bool:\n final_outputs, final_labels = cfattack.target.get_sample_labels(\n cfattack.results)\n new_labels = np.atleast_1d(final_labels)\n old_labels = np.atleast_1d(cfattack.initial_labels)\n\n cfattack.final_labels = final_labels\n cfattack.final_outputs = final_outputs\n\n success_arr = new_labels != np.array(old_labels)\n return success_arr\n",
"import pickle\nimport re\nimport copy\n\nimport numpy as np\nimport pandas as pd\nfrom torch import nn\nimport torch\n\nfrom counterfit.core.targets import Target\n\nclass MovieReviewsTarget(Target):\n \"\"\"Defining movie reviews target which is responsible for predicting the scores for a given input and convert scores to labels.\n \"\"\"\n target_data_type = \"text\"\n target_name = \"movie_reviews\"\n target_endpoint = f\"movie_reviews_sentiment_analysis.pt\"\n target_input_shape = (1,)\n target_output_classes = [0, 1] # textattack requires these to be integers\n target_classifier = \"BlackBox\"\n\n sample_input_path = f\"movie-reviews-scores-full.csv\"\n vocab_file = f\"movie-reviews-vocab.pkl\"\n X = []\n\n def load(self):\n \"\"\"[summary]\n \"\"\"\n self.data = pd.read_csv(self.fullpath(self.sample_input_path))\n print(f\"\\n[+] Total Movie Reviews: {len(self.data)}\\n\")\n self._load_x()\n self.vocab = self._load_vocab()\n self.model = self._load_model()\n\n def _load_x(self):\n \"\"\"[summary]\n \"\"\"\n # Append input reviews to X list\n for idx in range(len(self.data)):\n self.X.append(self.data['review'][idx])\n\n def _load_vocab(self):\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\"\n # Load vocabulary file; 1000 most occurence words\n with open(self.fullpath(self.vocab_file), 'rb') as fp:\n vocab = pickle.load(fp)\n return vocab\n\n def preprocess_string(self, s):\n \"\"\"[summary]\n\n Args:\n s ([type]): [description]\n\n Returns:\n [type]: [description]\n \"\"\"\n # Remove all non-word characters (everything except numbers and letters)\n s = re.sub(r\"[^\\w\\s]\", '', s)\n # Replace all runs of whitespaces with no space\n s = re.sub(r\"\\s+\", '', s)\n # replace digits with no space\n s = re.sub(r\"\\d\", '', s)\n\n return s\n\n def _load_model(self):\n \"\"\"[summary]\n\n Returns:\n [type]: [description]\n \"\"\"\n # Load the LST model that's already trained\n no_layers = 2\n vocab_size = len(self.vocab) + 1 # extra 1 for padding purpose\n embedding_dim = 64\n output_dim = 1\n hidden_dim = 256\n model = MovieReviewsSentimentLSTM(\n no_layers, vocab_size, hidden_dim, embedding_dim, output_dim, drop_prob=0.5)\n model.load_state_dict(copy.deepcopy(\n torch.load(self.fullpath(self.target_endpoint), 'cpu')))\n model.eval()\n return model\n\n def padding_(self, sentences, seq_len):\n # Padding with zeros if sentence is less than required seq length\n features = np.zeros((len(sentences), seq_len), dtype=int)\n for ii, review in enumerate(sentences):\n if len(review) != 0:\n features[ii, -len(review):] = np.array(review)[:seq_len]\n return features\n\n def predict(self, x):\n \"\"\"This function takes list of input texts. For example., [\"how are you?\"]\n\n Args:\n x (list): [input_text]\n\n Returns:\n final_prob_scores: [[0.98, 0.02]] 0.98 probability score represents the sentence tone is positive and 0.02 score represents \n \"\"\"\n final_prob_scores = []\n for text in x:\n word_seq = np.array([self.vocab[self.preprocess_string(word)] for word in text.split()\n if self.preprocess_string(word) in self.vocab.keys()])\n word_seq = np.expand_dims(word_seq, axis=0)\n pad = torch.from_numpy(self.padding_(word_seq, 500))\n inputs = pad.to('cpu')\n batch_size = 1\n h = self.model.init_hidden(batch_size)\n h = tuple([each.data for each in h])\n output, h = self.model(inputs, h)\n probability = output.item()\n final_prob_scores.append([probability, 1.0-probability])\n return final_prob_scores # this must produce a list of class probabilities\n\nclass MovieReviewsSentimentLSTM(nn.Module):\n \"\"\"pre-trained LSTM model on 25 epochs for building sentiment analysis model on IMDB movies review dataset. \n \"\"\"\n\n def __init__(self, no_layers, vocab_size, hidden_dim, embedding_dim, output_dim, drop_prob=0.5):\n # embedding_dim: number of expected features in the input `x`\n # hidden_dim: number of features in the hidden state `h`\n\n super(MovieReviewsSentimentLSTM, self).__init__()\n\n self.no_layers = no_layers # number of recurrent layers\n self.vocab_size = vocab_size\n self.hidden_dim = hidden_dim # The number of features in the hidden state h\n # embedding and LSTM layers\n self.embedding = nn.Embedding(vocab_size, embedding_dim)\n self.proj_size = 0\n self.output_dim = output_dim # The size of the output you desire from your RNN\n # dropout layer\n # a Dropout layer on the outputs of each LSTM layer except the last layer, with dropout probability equal to dropout\n self.dropout = nn.Dropout(drop_prob)\n\n # lstm\n self.lstm = nn.LSTM(input_size=embedding_dim, hidden_size=self.hidden_dim,\n num_layers=no_layers, batch_first=True, proj_size=self.proj_size)\n\n # final fully connected linear and sigmoid layer\n self.fc = nn.Linear(self.hidden_dim, self.output_dim)\n self.sig = nn.Sigmoid()\n\n def forward(self, x, hidden):\n \"\"\"Forward process of LSTM model\n\n Args:\n x ([tensor]): training data/batch_first\n\n\n Returns:\n Last sigmoid output and hidden state\n \"\"\"\n batch_size = x.size(0)\n # embeddings and lstm_out\n # shape: Batch x Sequence x Feature since batch_first = True\n embeds = self.embedding(x)\n lstm_out, hidden = self.lstm(embeds, hidden)\n\n lstm_out = lstm_out.contiguous().view(-1, self.hidden_dim)\n\n # dropout and fully connected layer\n out = self.dropout(lstm_out)\n out = self.fc(out)\n\n # sigmoid function\n sig_out = self.sig(out)\n\n # reshape to be batch_size first\n sig_out = sig_out.view(batch_size, -1)\n\n sig_out = sig_out[:, -1] # get last batch of labels\n\n return sig_out, hidden\n\n def init_hidden(self, batch_size, device='cpu'):\n # initialize hidden weights (h,c) to 0\n weights = next(self.parameters()).data\n h = (weights.new(self.no_layers, batch_size, self.hidden_dim).zero_().to(device),\n weights.new(self.no_layers, batch_size, self.hidden_dim).zero_().to(device))\n\n return h\n\n\n"
] |
[
[
"numpy.atleast_1d",
"numpy.array"
],
[
"torch.nn.Dropout",
"numpy.expand_dims",
"torch.nn.LSTM",
"torch.nn.Embedding",
"torch.nn.Sigmoid",
"torch.nn.Linear",
"numpy.array"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Ahanio/nucleoli_segment
|
[
"11a63117c83e607cf9752007e6ccc8766d01ca9e"
] |
[
"inference_example.py"
] |
[
"from all_in_one import AllInOneModel\nimport os\nfrom skimage.io import imsave\nimport torch\nimport numpy as np\nfrom imageio import volread\n\npaths = [\n \"./data_example/e-0479_c-1_siRNA-11_pos-10_RI.tiff\",\n \"./data_example/e-0479-c-61-untreated-test_RI.tiff\",\n]\n\nimg = torch.cat([torch.Tensor([volread(path)]).float() for path in paths], dim=0)\nfocus_frame_idxs = 41\n\nmodel = AllInOneModel(focus_frame_idx=focus_frame_idxs)\nmodel.load_state_dict(torch.load(\"./weights/nucleoli_weights.tar\", map_location=torch.device('cpu')))\n\npred = model(img).data.cpu().numpy()\n\nimsave(f\"./results/test1.png\", pred[0, 0].astype(\"uint8\") * 255)\nimsave(f\"./results/test2.png\", pred[1, 0].astype(\"uint8\") * 255)\n"
] |
[
[
"torch.device"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
Yaodong1208/adv
|
[
"0306bf658c95df9dede67991fc79c29e887ee128"
] |
[
"stAdv/tests/test_optimization.py"
] |
[
"from .context import stadv, call_assert\n\nimport tensorflow as tf\nimport numpy as np\n\n\nclass LBFGSCase(tf.test.TestCase):\n \"\"\"Test the lbfgs optimization function.\n Note: we are NOT testing the LBFGS implementation from SciPy, instead we\n test our wrapping and its interplay with TensorFlow.\"\"\"\n\n def setUp(self):\n self.example_flow = np.array([[0.5, 0.4], [-0.2, 0.7]])\n self.flows = tf.Variable(self.example_flow, name='flows')\n self.loss_l2 = tf.reduce_sum(tf.square(self.flows), name='loss_l2')\n self.loss_dummy = tf.Variable(1.4, name='loss_dummy')\n\n tf.global_variables_initializer()\n\n def test_l2_norm_loss(self):\n \"\"\"Check that simple L2 loss leads to 0 loss and gradient in the end.\"\"\"\n results = stadv.optimization.lbfgs(\n self.loss_l2, self.flows, flows_x0=self.example_flow\n )\n call_assert(\n self.assertEqual,\n results['flows'].shape, self.example_flow.shape,\n msg='initial and optimized flows have a different shape'\n )\n call_assert(\n self.assertAllClose,\n results['flows'], np.zeros(results['flows'].shape),\n msg='optimized flows significantly differ from 0'\n )\n call_assert(\n self.assertAllClose,\n results['loss'], np.zeros(results['loss'].shape),\n msg='final gradients significantly differ from 0'\n )\n\n def test_dummy_loss(self):\n \"\"\"Make sure a dummy loss (no computable gradient) gives an error.\"\"\"\n with self.assertRaises(ValueError):\n stadv.optimization.lbfgs(\n self.loss_dummy, self.flows, flows_x0=self.example_flow\n )\n\n def test_overwriting_optimized_function(self):\n \"\"\"Make sure we cannot overwrite argument defining the function to\n optimize.\"\"\"\n with self.assertRaises(ValueError):\n stadv.optimization.lbfgs(\n self.loss_dummy, self.flows, flows_x0=self.example_flow,\n fmin_l_bfgs_b_extra_kwargs={'func': np.sqrt}\n )\n\nif __name__ == '__main__':\n tf.test.main()\n"
] |
[
[
"tensorflow.Variable",
"tensorflow.test.main",
"tensorflow.global_variables_initializer",
"tensorflow.square",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": [
"1.10",
"1.12",
"1.4",
"1.13",
"1.5",
"1.7",
"0.12",
"1.0",
"1.2"
]
}
] |
sandmaennchen/acados
|
[
"3119e2dda636a8358fbd52247eb0163a167cbc97"
] |
[
"examples/acados_python/getting_started/ocp/example_gnsf_ocp.py"
] |
[
"#\n# Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren,\n# Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor,\n# Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan,\n# Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl\n#\n# This file is part of acados.\n#\n# The 2-Clause BSD License\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# 1. Redistributions of source code must retain the above copyright notice,\n# this list of conditions and the following disclaimer.\n#\n# 2. Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE\n# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR\n# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF\n# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS\n# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN\n# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)\n# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.;\n#\n\nimport sys\nsys.path.insert(0, '../common')\n\nimport json\nfrom acados_template import AcadosOcp, AcadosOcpSolver\nfrom export_pendulum_ode_model import export_pendulum_ode_model\nimport numpy as np\nimport scipy.linalg\nfrom utils import plot_pendulum\n\n# create ocp object to formulate the OCP\nocp = AcadosOcp()\n\n# set model\nmodel = export_pendulum_ode_model()\nocp.model = model\n\n# load gnsf model\n# NOTE: generated from Matlab, using simulation example of pendulum model with irk_gnsf\n# then >> dump_gnsf_functions(sim.model_struct)\nwith open('../common/' + model.name + '_gnsf_functions.json', 'r') as f:\n gnsf_dict = json.load(f)\nocp.gnsf_model = gnsf_dict\n\nTf = 1.0\nnx = model.x.size()[0]\nnu = model.u.size()[0]\nny = nx + nu\nny_e = nx\nN = 20\n\n# set dimensions\nocp.dims.ny = ny\nocp.dims.ny_e = ny_e\nocp.dims.nbu = nu\nocp.dims.N = N\n\n# set cost\nQ = 2*np.diag([1e3, 1e3, 1e-2, 1e-2])\nR = 2*np.diag([1e-2])\n\nocp.cost.W_e = Q\nocp.cost.W = scipy.linalg.block_diag(Q, R)\n\nocp.cost.Vx = np.zeros((ny, nx))\nocp.cost.Vx[:nx,:nx] = np.eye(nx)\n\nVu = np.zeros((ny, nu))\nVu[4,0] = 1.0\nocp.cost.Vu = Vu\n\nocp.cost.Vx_e = np.eye(nx)\n\nocp.cost.yref = np.zeros((ny, ))\nocp.cost.yref_e = np.zeros((ny_e, ))\n\n# set constraints\nFmax = 80\nocp.constraints.lbu = np.array([-Fmax])\nocp.constraints.ubu = np.array([+Fmax])\nocp.constraints.x0 = np.array([0.0, np.pi, 0.0, 0.0])\nocp.constraints.idxbu = np.array([0])\n\nocp.solver_options.qp_solver = 'PARTIAL_CONDENSING_HPIPM' # FULL_CONDENSING_QPOASES\nocp.solver_options.hessian_approx = 'GAUSS_NEWTON'\nocp.solver_options.integrator_type = 'GNSF'\nocp.solver_options.print_level = 0\n\n# set prediction horizon\nocp.solver_options.tf = Tf\nocp.solver_options.nlp_solver_type = 'SQP' # SQP_RTI\n\nocp_solver = AcadosOcpSolver(ocp, json_file = 'acados_ocp.json')\n\nsimX = np.ndarray((N+1, nx))\nsimU = np.ndarray((N, nu))\n\nstatus = ocp_solver.solve()\n\nif status != 0:\n raise Exception('acados returned status {}. Exiting.'.format(status))\n\n# get solution\nfor i in range(N):\n simX[i,:] = ocp_solver.get(i, \"x\")\n simU[i,:] = ocp_solver.get(i, \"u\")\nsimX[N,:] = ocp_solver.get(N, \"x\")\n\nplot_pendulum(np.linspace(0, Tf, N+1), Fmax, simU, simX)\n"
] |
[
[
"numpy.diag",
"numpy.linspace",
"numpy.eye",
"numpy.ndarray",
"numpy.array",
"numpy.zeros"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
}
] |
slamavl/QChemTool
|
[
"b6b17adf6cfa8ac1db47acba93aab1ee49c1be47",
"b6b17adf6cfa8ac1db47acba93aab1ee49c1be47"
] |
[
"tests/Polarizability/Derivative/Derivative_elstat.py",
"tests/MD/Normal_Mode/FGrph_NMA.py"
] |
[
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jan 5 17:09:34 2018\r\n\r\n@author: Vladislav Sláma\r\n\"\"\"\r\n\r\nimport numpy as np\r\nfrom QChemTool.Polarizable_atoms.Electrostatics_module import Electrostatics\r\n\r\n'''----------------------- TEST PART --------------------------------'''\r\nif __name__==\"__main__\":\r\n\r\n print(' TESTS')\r\n print('-----------------------------------------') \r\n \r\n ''' Test derivation of energy d/dR ApB '''\r\n coor = [[0.0,0.0,0.0],[0.0,0.0,1.0],[3.0,0.0,0.0],[3.0,0.0,1.0]]\r\n coor = np.array(coor,dtype='f8')\r\n charge = np.ones(4,dtype='f8')\r\n at_type = ['CD','CD','CF','CF']\r\n Mol_elstat = Electrostatics(coor,charge,at_type)\r\n Eshift = Mol_elstat.get_EnergyShift()\r\n Eshift2, derivative = Mol_elstat.get_EnergyShift_and_Derivative()\r\n \r\n print(Eshift,Eshift2,Eshift2-Eshift)\r\n \r\n for kk in range(8):\r\n dr = 1/10**kk\r\n Mol_elstat_dr=np.zeros(12)\r\n for ii in range(3):\r\n for jj in range(4):\r\n coor_tmp = coor.copy()\r\n coor_tmp[jj,ii] += dr\r\n Mol_elstat_tmp = Electrostatics(coor_tmp,charge,at_type)\r\n Mol_elstat_dr[jj*3+ii] = Mol_elstat_tmp.get_EnergyShift()\r\n \r\n Mol_elstat_dr = (Mol_elstat_dr - Eshift)/dr\r\n \r\n suma = np.sum(np.abs(Mol_elstat_dr-derivative))\r\n print('dr=',dr,' and sum=',suma)\r\n \r\n print(derivative)",
"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Wed Jan 17 16:27:58 2018\r\n\r\n@author: Vladislav Sláma\r\n\"\"\"\r\nimport numpy as np\r\nfrom scipy.optimize import minimize\r\n\r\nfrom QChemTool.General.units import conversion_facs_position as conv_pos\r\nfrom QChemTool.QuantumChem.Classes.structure import Structure\r\nfrom QChemTool.QuantumChem.Fluorographene.fluorographene import get_AMBER_MD_normal_modes,get_border_carbons_FG\r\n\r\nglobal frcmod_filename,struc,state,FF_param\r\n\r\noptimize = True\r\ncompare_w_gauss = True\r\ncharges = 'Hirshfeld'\r\nstate='Ground'\r\nFF_param = {'equilibrium': {}, 'force': {}}\r\n\r\n# Parameters obtained from fitting geometry\r\nFF_param['equilibrium']['c3-c3'] = 1.50398 #1.5350\r\nFF_param['equilibrium']['cb-c3'] = 1.58027 #1.5350\r\nFF_param['equilibrium']['cb-cb'] = 1.58349 #1.5350\r\nFF_param['equilibrium']['c3-f'] = 1.45975\r\nFF_param['equilibrium']['c3-cb-c3'] = 99.781\r\nFF_param['equilibrium']['cb-cb-c3'] = 93.821\r\nFF_param['equilibrium']['c3-c3-c3'] = 112.575\r\n\r\n# parameters obtained from normal mode calculation\r\nFF_param['force']['c3-c3'] = 303.1 # 303.1\r\nFF_param['force']['cb-c3'] = 303.1 # 303.1\r\nFF_param['force']['cb-cb'] = 303.1 # 303.1\r\nFF_param['force']['c3-f'] = 363.8 # 363.8\r\nFF_param['force']['cb-fb'] = FF_param['force']['c3-f']\r\nFF_param['force']['c3-c3-c3'] = 63.21 # 63.21\r\nFF_param['force']['cb-c3-c3'] = 63.21 # 63.21\r\nFF_param['force']['c3-c3-f'] = 66.22 # 63.21\r\nFF_param['force']['cb-cb-fb'] = 66.22 # 63.21\r\nFF_param['force']['c3-cb-fb'] = FF_param['force']['cb-cb-fb']\r\n\r\n# 'cb-cb-c3': [63.21,110.63],\r\n# 'cb-c3-cb': [63.21,110.63], 'c3-cb-c3': [63.21,110.63],\r\n# 'cb-cb-cb': [63.21,110.63], 'cb-cb-fb': [66.22,109.41],\r\n# 'cb-c3-f': [66.22,109.41], 'c3-cb-fb': [66.22,109.41], \r\n# 'fb-cb-fb': [71.260,120.0], 'c3-c3-f': [66.22,109.41],\r\n# 'f-c3-f': [71.260,210.0]\r\n\r\n# Set FG charges\r\nif charges == 'Hirshfeld':\r\n FG_charges = [0.08125,0.08125]\r\n\r\nif compare_w_gauss or optimize:\r\n # read normal mode information from gaussian freq calculation\r\n log_filename = \"/mnt/sda2/PhD/Ab-initio-META/Fluorographane/Freq/FGrph-5hex_opt_freqHP_reord.log\"\r\n fchk_filename = \"/mnt/sda2/PhD/Ab-initio-META/Fluorographane/Freq/FGrph-5hex_opt_freqHP_reord.fchk\"\r\n \r\n from QChemTool.QuantumChem.Classes.molecule import Molecule\r\n mol_gauss = Molecule(\"Frequency calculation\")\r\n mol_gauss.load_Gaussian_fchk(fchk_filename)\r\n mol_gauss.load_Gaussian_log(log_filename)\r\n freq_gauss = mol_gauss.vib_spec['Frequency']\r\n\r\n# Load initial structure\r\nstruc = Structure()\r\nstruc.load_xyz(\"FGrph-5hex_opt_freqHP_reord.xyz\")\r\n\r\n# assign charges\r\nborder_C_indx,border_F_indx = get_border_carbons_FG(struc)\r\nif state=='Ground':\r\n struc.esp_grnd = np.zeros(struc.nat,dtype='f8')\r\n charges = struc.esp_grnd\r\nelif state=='Excited':\r\n struc.esp_exct = np.zeros(struc.nat,dtype='f8')\r\n charges = struc.esp_exct\r\nelif state=='Transition':\r\n struc.esp_trans = np.zeros(struc.nat,dtype='f8')\r\n charges = struc.esp_trans\r\n \r\nfor ii in range(struc.nat):\r\n if struc.at_type[ii] == 'C':\r\n charges[ii] = FG_charges[0]\r\n elif struc.at_type[ii] == 'F':\r\n charges[ii] = -FG_charges[0]\r\n else:\r\n raise Warning(\"Unknown atom type in structure\")\r\ncharges[border_C_indx] = 2*FG_charges[1]\r\ncharges[border_F_indx] = -FG_charges[1]\r\n\r\n\r\nNM_info = get_AMBER_MD_normal_modes(struc,state=state,gen_input=True,**FF_param)\r\n\r\ndef compare_NM(param):\r\n print(param)\r\n FF_param['force']['c3-c3'] = param[0]\r\n FF_param['force']['cb-c3'] = param[1]\r\n FF_param['force']['cb-cb'] = param[2]\r\n FF_param['force']['c3-f'] = param[3]\r\n FF_param['force']['cb-fb'] = FF_param['force']['c3-f']\r\n FF_param['force']['c3-c3-c3'] = param[4]\r\n FF_param['force']['cb-c3-c3'] = FF_param['force']['c3-c3-c3']\r\n FF_param['force']['c3-cb-c3'] = FF_param['force']['c3-c3-c3']\r\n FF_param['force']['cb-cb-c3'] = FF_param['force']['c3-c3-c3']\r\n FF_param['force']['cb-c3-cb'] = FF_param['force']['c3-c3-c3']\r\n FF_param['force']['cb-cb-cb'] = FF_param['force']['c3-c3-c3']\r\n FF_param['force']['c3-c3-f'] = param[5]\r\n FF_param['force']['cb-cb-fb'] = FF_param['force']['c3-c3-f']\r\n FF_param['force']['c3-cb-fb'] = FF_param['force']['cb-cb-fb']\r\n print(FF_param)\r\n NM_info = get_AMBER_MD_normal_modes(struc,state=state,gen_input=False,**FF_param)\r\n \r\n diff = np.sum(np.abs( freq_gauss - NM_info['freq'] )/ freq_gauss)\r\n print('\\n')\r\n print(diff)\r\n print('\\n')\r\n return diff\r\n\r\nif optimize:\r\n min_method='SLSQP'\r\n options={'eps': 0.1} \r\n res = minimize(compare_NM,(FF_param['force']['c3-c3'],FF_param['force']['cb-c3'],FF_param['force']['cb-cb'],FF_param['force']['c3-f'],FF_param['force']['c3-c3-c3'],FF_param['force']['c3-c3-f']),method=min_method,options=options)\r\n print(res)\r\n\r\n NM_info = get_AMBER_MD_normal_modes(struc,state=state,gen_input=True,**FF_param)\r\n\r\n# plot histogram \r\nimport matplotlib.pyplot as plt\r\nstep = 50\r\nbins = np.arange(0,max(NM_info['freq'][-1],freq_gauss[-1]),50.0)\r\nplt.hist(NM_info['freq'], alpha=0.5, normed=False, bins=bins, label='AMBER MD')\r\nif compare_w_gauss or optimize:\r\n plt.hist(freq_gauss, alpha=0.5, normed=False, bins=bins, label='Gaussian09')\r\nplt.xlabel('Frequency');\r\nplt.xlabel('Count');\r\nplt.show()\r\n\r\n\r\n# NM_info[\"int2cart\"] = InternalToCartesian\r\n# NM_info[\"cart2int\"] = CartesianToInternal\r\n# NM_info[\"freq\"] = Freqcm1\r\n# NM_info[\"RedMass\"] = RedMass\r\n# NM_info['force'] = ForcesCm1Agstrom2\r\n# NM_info['units'] = {\"freq\": \"1/cm\", \"RedMass\": \"AMU(atomic mass units)\",\r\n# \"force\": \"1/(cm * Angstrom^2)\", \"int2cart\": \"dimensionles\",\r\n# 'cart2int': \"dimensionles\"}\r\n# {'equilibrium': {'cb-cb-c3': 93.821, 'c3-c3-c3': 112.575, 'c3-cb-c3': 99.781, 'c3-f': 1.45975, 'c3-c3': 1.50398, 'cb-cb': 1.58349, 'cb-c3': 1.58027}, 'force': {'cb-cb-c3': 46.161964772446098, 'c3-cb-c3': 46.161964772446098, 'cb-cb-cb': 46.161964772446098, 'c3-c3-f': 38.338517296715168, 'c3-c3-c3': 46.161964772446098, 'c3-c3': 149.92644957647627, 'cb-cb-fb': 38.338517296715168, 'c3-f': 281.97958014222456, 'cb-c3-c3': 46.161964772446098, 'cb-fb': 281.97958014222456, 'c3-cb-fb': 38.338517296715168, 'cb-cb': 281.64140482308568, 'cb-c3-cb': 46.161964772446098, 'cb-c3': 133.4506671439552}}\r\n"
] |
[
[
"numpy.array",
"numpy.zeros",
"numpy.abs",
"numpy.ones"
],
[
"numpy.abs",
"scipy.optimize.minimize",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"numpy.zeros",
"matplotlib.pyplot.hist"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [],
"tensorflow": []
},
{
"matplotlib": [],
"numpy": [],
"pandas": [],
"scipy": [
"0.13",
"1.6",
"0.14",
"1.10",
"0.15",
"1.4",
"0.16",
"1.9",
"0.19",
"1.5",
"0.18",
"1.2",
"1.7",
"0.12",
"1.0",
"0.17",
"1.3",
"1.8"
],
"tensorflow": []
}
] |
jairoruizsaenz/scattertext
|
[
"5d96f62434057cc26ed90a1d0b314984e4ef90f8"
] |
[
"scattertext/TermDocMatrix.py"
] |
[
"import warnings\nfrom copy import copy\n\nimport numpy as np\nimport pandas as pd\nimport scipy\nfrom pandas.core.common import SettingWithCopyWarning\nfrom scipy.sparse import csr_matrix\nfrom scipy.stats import hmean, fisher_exact, rankdata, norm\nfrom sklearn.feature_extraction.text import TfidfTransformer\nfrom sklearn.linear_model import ElasticNet\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import RidgeClassifierCV, LassoCV\n\nfrom scattertext.CSRMatrixTools import delete_columns, CSRMatrixFactory\nfrom scattertext.Common import DEFAULT_BETA, DEFAULT_SCALER_ALGO\nfrom scattertext.TermDocMatrixWithoutCategories import TermDocMatrixWithoutCategories\nfrom scattertext.indexstore import IndexStore, IndexStoreFromList\nfrom scattertext.termscoring.CornerScore import CornerScore\n\nwarnings.simplefilter(action=\"ignore\", category=SettingWithCopyWarning)\n\nfrom scattertext.termscoring.ScaledFScore import InvalidScalerException, ScaledFScore\n\n\nclass CannotCreateATermDocMatrixWithASignleCategoryException(Exception):\n pass\n\n\nclass TermDocMatrix(TermDocMatrixWithoutCategories):\n '''\n !!! to do: refactor score functions into classes\n '''\n\n def __init__(self,\n X,\n mX,\n y,\n term_idx_store,\n category_idx_store,\n metadata_idx_store,\n unigram_frequency_path=None):\n '''\n\n Parameters\n ----------\n X : csr_matrix\n term document matrix\n mX : csr_matrix\n metadata-document matrix\n y : np.array\n category index array\n term_idx_store : IndexStore\n Term indices\n category_idx_store : IndexStore\n Catgory indices\n metadata_idx : IndexStore\n Document metadata indices\n unigram_frequency_path : str or None\n Path to term frequency file.\n '''\n TermDocMatrixWithoutCategories.__init__(self, X=X, mX=mX, term_idx_store=term_idx_store,\n metadata_idx_store=metadata_idx_store,\n unigram_frequency_path=unigram_frequency_path)\n self._y = y\n self._category_idx_store = category_idx_store\n\n def get_categories(self):\n '''\n Returns\n -------\n list\n Category names\n '''\n return self._category_idx_store.values()\n\n def old_get_term_freq_df(self):\n d = {'term': self._term_idx_store._i2val}\n for i, category in self._category_idx_store.items():\n d[category + ' freq'] = self._X[self._y == i].sum(axis=0).A1\n return pd.DataFrame(d).set_index('term')\n\n def get_term_freq_df(self, label_append=' freq'):\n '''\n Parameters\n -------\n label_append : str\n\n Returns\n -------\n pd.DataFrame indexed on terms, with columns giving frequencies for each\n '''\n\n '''\n row = self._row_category_ids()\n newX = csr_matrix((self._X.data, (row, self._X.indices)))\n return self._term_freq_df_from_matrix(newX)\n '''\n mat = self.get_term_freq_mat()\n return pd.DataFrame(mat,\n index=pd.Series(self.get_terms(), name='term'),\n columns=[str(c) + label_append for c in self.get_categories()])\n\n def get_term_freq_mat(self):\n '''\n Returns\n -------\n np.array with columns as categories and rows as terms\n '''\n freq_mat = np.zeros(shape=(self.get_num_terms(), self.get_num_categories()),\n dtype=self.get_term_doc_mat().dtype)\n for cat_i in range(self.get_num_categories()):\n freq_mat[:, cat_i] = self._X[self._y == cat_i, :].sum(axis=0)\n return freq_mat\n\n def get_term_count_mat(self):\n '''\n Returns\n -------\n np.array with columns as categories and rows as terms\n '''\n freq_mat = np.zeros(shape=(self.get_num_terms(), self.get_num_categories()),\n dtype=self.get_term_doc_mat().dtype)\n for cat_i in range(self.get_num_categories()):\n X = (self._X[self._y == cat_i, :] > 0).astype(int)\n freq_mat[:, cat_i] = X.sum(axis=0)\n return freq_mat\n\n def get_metadata_count_mat(self):\n '''\n Returns\n -------\n np.array with columns as categories and rows as terms\n '''\n freq_mat = np.zeros(shape=(self.get_num_metadata(), self.get_num_categories()),\n dtype=self.get_metadata_doc_mat().dtype)\n for cat_i in range(self.get_num_categories()):\n mX = (self._mX[self._y == cat_i, :] > 0).astype(int)\n freq_mat[:, cat_i] = mX.sum(axis=0)\n return freq_mat\n\n def get_term_doc_count_df(self, label_append=' freq'):\n '''\n\n Returns\n -------\n pd.DataFrame indexed on terms, with columns the number of documents each term appeared in\n each category\n '''\n # row = self._row_category_ids()\n # newX = csr_matrix(((self._X.data > 0).astype(int), (row, self._X.indices)))\n # return self._ term_freq_df_from_matrix(newX)\n mat = self.get_term_count_mat()\n return pd.DataFrame(mat,\n index=self.get_terms(),\n columns=[str(c) + label_append for c in self.get_categories()])\n\n def get_metadata_doc_count_df(self, label_append=' freq'):\n '''\n\n Returns\n -------\n pd.DataFrame indexed on metadata, with columns the number of documents\n each metadata appeared in each category\n '''\n mat = self.get_metadata_count_mat()\n return pd.DataFrame(mat,\n index=self.get_metadata(),\n columns=[str(c) + label_append for c in self.get_categories()])\n\n def _term_freq_df_from_matrix(self, catX, label_append=' freq'):\n return self._get_freq_df_using_idx_store(catX, self._term_idx_store, label_append=label_append)\n\n def _get_freq_df_using_idx_store(self, catX, idx_store, label_append=' freq'):\n d = {'term': idx_store._i2val}\n for idx, cat in self._category_idx_store.items():\n try:\n d[str(cat) + label_append] = catX[idx, :].A[0]\n except IndexError:\n self._fix_problem_when_final_category_index_has_no_terms(cat, catX, d, label_append)\n return pd.DataFrame(d).set_index('term')\n\n def _fix_problem_when_final_category_index_has_no_terms(self, cat, catX, d, label_append=' freq'):\n d[str(cat) + label_append] = np.zeros(catX.shape[1])\n\n def get_metadata_freq_df(self, label_append=' freq'):\n '''\n Parameters\n -------\n label_append : str\n\n Returns\n -------\n pd.DataFrame indexed on metadata, with columns giving frequencies for each category\n '''\n '''\n row = self._row_category_ids_for_meta()\n newX = csr_matrix((self._mX.data, (row, self._mX.indices)))\n return self._metadata_freq_df_from_matrix(newX, label_append)\n '''\n freq_mat = np.zeros(shape=(self.get_num_metadata(), self.get_num_categories()),\n dtype=self.get_metadata_doc_mat().dtype)\n for cat_i in range(self.get_num_categories()):\n freq_mat[:, cat_i] = self._mX[self._y == cat_i, :].sum(axis=0)\n return pd.DataFrame(freq_mat,\n index=pd.Series(self.get_metadata(), name='term'),\n columns=[str(c) + label_append for c in self.get_categories()])\n\n def _row_category_ids(self):\n row = self._X.tocoo().row\n for i, cat in enumerate(self._y):\n row[row == i] = cat\n return row\n\n def _row_category_ids_for_meta(self):\n row = self._mX.tocoo().row\n for i, cat in enumerate(self._y):\n row[row == i] = cat\n return row\n\n def _metadata_freq_df_from_matrix(self, catX, label_append=' freq'):\n return self._get_freq_df_using_idx_store(catX, self._metadata_idx_store, label_append)\n\n def get_category_names_by_row(self):\n '''\n Returns\n -------\n np.array of the category name for each row\n '''\n return np.array(self.get_categories())[self._y]\n\n def _change_document_type_in_matrix(self, X, new_doc_ids):\n new_data = self._make_all_positive_data_ones(X.data)\n newX = csr_matrix((new_data, (new_doc_ids, X.indices)))\n return newX\n\n def keep_only_these_categories(self, categories, ignore_absences=False):\n '''\n Non destructive category removal.\n\n Parameters\n ----------\n categories : list\n list of categories to keep\n ignore_absences : bool, False by default\n if categories does not appear, don't raise an error, just move on.\n\n Returns\n -------\n TermDocMatrix, new object with categories removed.\n '''\n if not ignore_absences:\n assert set(self.get_categories()) & set(categories) == set(categories)\n categories_to_remove = [c for c in self.get_categories() if c not in categories]\n return self.remove_categories(categories_to_remove)\n\n def remove_categories(self, categories, ignore_absences=False):\n '''\n Non destructive category removal.\n\n Parameters\n ----------\n categories : list\n list of categories to remove\n ignore_absences : bool, False by default\n if categories does not appear, don't raise an error, just move on.\n\n Returns\n -------\n TermDocMatrix, new object with categories removed.\n '''\n idx_to_delete_list = []\n existing_categories = set(self.get_categories())\n for category in categories:\n if category not in existing_categories:\n if not ignore_absences:\n raise KeyError('Category %s not found' % (category))\n continue\n idx_to_delete_list.append(self._category_idx_store.getidx(category))\n new_category_idx_store = self._category_idx_store.batch_delete_idx(idx_to_delete_list)\n\n columns_to_delete = np.nonzero(np.isin(self._y, idx_to_delete_list))\n new_X = delete_columns(self._X.T, columns_to_delete).T\n new_mX = delete_columns(self._mX.T, columns_to_delete).T\n intermediate_y = self._y[~np.isin(self._y, idx_to_delete_list)]\n old_y_to_new_y = [self._category_idx_store.getidx(x)\n for x in new_category_idx_store._i2val]\n new_y = np.array([old_y_to_new_y.index(i) if i in old_y_to_new_y else None\n for i in range(intermediate_y.max() + 1)])[intermediate_y]\n\n new_metadata_idx_store = self._metadata_idx_store\n\n if self.metadata_in_use():\n meta_idx_to_delete = np.nonzero(new_mX.sum(axis=0).A1 == 0)[0]\n new_metadata_idx_store = self._metadata_idx_store.batch_delete_idx(meta_idx_to_delete)\n\n term_idx_to_delete = np.nonzero(new_X.sum(axis=0).A1 == 0)[0]\n new_term_idx_store = self._term_idx_store.batch_delete_idx(term_idx_to_delete)\n new_X = delete_columns(new_X, term_idx_to_delete)\n\n term_doc_mat_to_ret = self._make_new_term_doc_matrix(new_X,\n new_mX,\n new_y.astype(int),\n new_term_idx_store,\n new_category_idx_store,\n new_metadata_idx_store,\n ~np.isin(self._y, idx_to_delete_list))\n return term_doc_mat_to_ret\n\n def remove_terms_by_indices(self, idx_to_delete_list, non_text=False):\n '''\n Parameters\n ----------\n idx_to_delete_list, list\n non_text, bool\n\n Returns\n -------\n TermDocMatrix\n '''\n new_X, new_idx_store = self._get_X_after_delete_terms(idx_to_delete_list, non_text)\n return self._make_new_term_doc_matrix(\n new_X=self._X if non_text else new_X,\n new_mX=new_X if non_text else self._mX,\n new_y=self._y,\n new_term_idx_store=self._term_idx_store if non_text else new_idx_store,\n new_category_idx_store=self._category_idx_store,\n new_metadata_idx_store=new_idx_store if non_text else self._metadata_idx_store,\n new_y_mask=self._y == self._y\n )\n\n def change_category_names(self, new_category_names):\n if len(new_category_names) != self.get_num_categories():\n raise Exception(\"The number of category names passed (%s) needs to equal \"\n \"the number of categories in the corpus (%s).\" %\n (len(new_category_names), self.get_num_categories()))\n return self._make_new_term_doc_matrix(\n new_category_idx_store=IndexStoreFromList.build(new_category_names)\n )\n\n def _make_new_term_doc_matrix(self,\n new_X=None,\n new_mX=None,\n new_y=None,\n new_term_idx_store=None,\n new_category_idx_store=None,\n new_metadata_idx_store=None,\n new_y_mask=None):\n X, mX, y = self._update_X_mX_y(new_X, new_mX, new_y, new_y_mask)\n return TermDocMatrix(\n X=X,\n mX=mX,\n y=y,\n term_idx_store=new_term_idx_store if new_term_idx_store is not None else self._term_idx_store,\n category_idx_store=new_category_idx_store if new_category_idx_store is not None else self._category_idx_store,\n metadata_idx_store=new_metadata_idx_store if new_metadata_idx_store is not None else self._metadata_idx_store,\n unigram_frequency_path=self._unigram_frequency_path\n )\n\n def _update_X_mX_y(self, new_X, new_mX, new_y, new_y_mask):\n X = new_X if new_X is not None else self._X\n mX = new_mX if new_mX is not None else self._mX\n y = new_y if new_y is not None else self._y\n if new_y_mask is not None:\n if len(y) == len(new_y_mask): # sometimes y is reduced by a calling function\n y = y[new_y_mask]\n if X.shape[0] == len(new_y_mask):\n X = X[new_y_mask, :]\n if mX.shape[0] == len(new_y_mask):\n mX = mX[new_y_mask, :]\n return X, mX, y\n\n def get_posterior_mean_ratio_scores(self, category):\n ''' Computes posterior mean score.\n Parameters\n ----------\n category : str\n category name to score\n\n Returns\n -------\n np.array\n '''\n return self._get_posterior_mean_ratio_from_category(category)\n\n def get_corner_scores(self, category):\n ''' Computes corner score, which is inversely correlated\n to the Rudder score to the nearest upper-left or lower-right corner.\n Parameters\n ----------\n category : str\n category name to score\n\n Returns\n -------\n np.array\n '''\n return CornerScore.get_scores(\n *self._get_catetgory_and_non_category_word_counts(category)\n )\n\n def get_rudder_scores(self, category):\n ''' Computes Rudder score.\n Parameters\n ----------\n category : str\n category name to score\n\n Returns\n -------\n np.array\n '''\n category_percentiles = self._get_term_percentiles_in_category(category)\n not_category_percentiles = self._get_term_percentiles_not_in_category(category)\n rudder_scores = self._get_rudder_scores_for_percentile_pair(category_percentiles,\n not_category_percentiles)\n return rudder_scores\n\n def _get_posterior_mean_ratio_from_category(self, category):\n cat_word_counts, not_cat_word_counts = self._get_catetgory_and_non_category_word_counts(category)\n return self._get_posterior_mean_ratio_from_counts(cat_word_counts, not_cat_word_counts)\n\n def _get_posterior_mean_ratio_from_counts(self, cat_word_counts, not_cat_word_counts):\n cat_posterior_mean = self._get_posterior_mean_from_counts(cat_word_counts, not_cat_word_counts)\n not_cat_posterior_mean = self._get_posterior_mean_from_counts(not_cat_word_counts, cat_word_counts)\n return np.log(cat_posterior_mean / not_cat_posterior_mean) / np.log(2)\n\n def _get_posterior_mean_from_counts(self, cat_word_counts, not_cat_word_counts):\n a = cat_word_counts\n b = cat_word_counts.sum() - cat_word_counts\n beta = ((cat_word_counts.sum() + not_cat_word_counts.sum())\n / (cat_word_counts + not_cat_word_counts) - 1)\n posterior_mean = (1. + a) / (1. + a + b + beta)\n return posterior_mean\n\n def get_logistic_regression_coefs_l2(self, category,\n clf=RidgeClassifierCV()):\n ''' Computes l2-penalized logistic regression score.\n Parameters\n ----------\n category : str\n category name to score\n\n category : str\n category name to score\n Returns\n -------\n (coefficient array, accuracy, majority class baseline accuracy)\n '''\n try:\n from sklearn.cross_validation import cross_val_predict\n except:\n from sklearn.model_selection import cross_val_predict\n y = self._get_mask_from_category(category)\n X = TfidfTransformer().fit_transform(self._X)\n clf.fit(X, y)\n y_hat = cross_val_predict(clf, X, y)\n acc, baseline = self._get_accuracy_and_baseline_accuracy(y, y_hat)\n return clf.coef_[0], acc, baseline\n\n def _get_accuracy_and_baseline_accuracy(self, y, y_hat):\n acc = sum(y_hat == y) * 1. / len(y)\n baseline = max([sum(y), len(y) - sum(y)]) * 1. / len(y)\n return acc, baseline\n\n def get_logistic_regression_coefs_l1(self, category,\n clf=LassoCV(alphas=[0.1, 0.001],\n max_iter=10000,\n n_jobs=-1)):\n ''' Computes l1-penalized logistic regression score.\n Parameters\n ----------\n category : str\n category name to score\n\n Returns\n -------\n (coefficient array, accuracy, majority class baseline accuracy)\n '''\n try:\n from sklearn.cross_validation import cross_val_predict\n except:\n from sklearn.model_selection import cross_val_predict\n y = self._get_mask_from_category(category)\n y_continuous = self._get_continuous_version_boolean_y(y)\n # X = TfidfTransformer().fit_transform(self._X)\n X = self._X\n\n clf.fit(X, y_continuous)\n y_hat = (cross_val_predict(clf, X, y_continuous) > 0)\n acc, baseline = self._get_accuracy_and_baseline_accuracy(y, y_hat)\n clf.fit(X, y_continuous)\n return clf.coef_, acc, baseline\n\n def get_regression_coefs(self, category, clf=ElasticNet()):\n ''' Computes regression score of tdfidf transformed features\n Parameters\n ----------\n category : str\n category name to score\n clf : sklearn regressor\n\n Returns\n -------\n coefficient array\n '''\n self._fit_tfidf_model(category, clf)\n return clf.coef_\n\n def get_logreg_coefs(self, category, clf=LogisticRegression()):\n ''' Computes regression score of tdfidf transformed features\n Parameters\n ----------\n category : str\n category name to score\n clf : sklearn regressor\n\n Returns\n -------\n coefficient array\n '''\n self._fit_tfidf_model(category, clf)\n return clf.coef_[0]\n\n def _fit_tfidf_model(self, category, clf):\n y = self._get_mask_from_category(category)\n y_continuous = self._get_continuous_version_boolean_y(y)\n X = TfidfTransformer().fit_transform(self._X)\n clf.fit(X, y_continuous)\n\n def _get_continuous_version_boolean_y(self, y_bool):\n return 1000 * (y_bool * 2. - 1)\n\n def get_scaled_f_scores(self,\n category,\n scaler_algo=DEFAULT_SCALER_ALGO,\n beta=DEFAULT_BETA):\n ''' Computes scaled-fscores\n Parameters\n ----------\n category : str\n category name to score\n scaler_algo : str\n Function that scales an array to a range \\in [0 and 1]. Use 'percentile', 'normcdf'. Default.\n beta : float\n Beta in (1+B^2) * (Scale(P(w|c)) * Scale(P(c|w)))/(B^2*Scale(P(w|c)) + Scale(P(c|w))). Default.\n Returns\n -------\n np.array of harmonic means of scaled P(word|category) and scaled P(category|word)\n '''\n\n assert beta > 0\n cat_word_counts, not_cat_word_counts = self._get_catetgory_and_non_category_word_counts(category)\n scores = self._get_scaled_f_score_from_counts(cat_word_counts, not_cat_word_counts, scaler_algo, beta)\n return np.array(scores)\n\n def _get_scaled_f_score_from_counts(self, cat_word_counts, not_cat_word_counts, scaler_algo,\n beta=DEFAULT_BETA):\n '''\n scaler = self._get_scaler_function(scaler_algo)\n p_word_given_category = cat_word_counts.astype(np.float64) / cat_word_counts.sum()\n p_category_given_word = cat_word_counts.astype(np.float64) / (cat_word_counts + not_cat_word_counts)\n scores \\\n = self._computer_harmoic_mean_of_probabilities_over_non_zero_in_category_count_terms(\n cat_word_counts, p_category_given_word, p_word_given_category, scaler\n )\n '''\n return ScaledFScore.get_scores(cat_word_counts, not_cat_word_counts, scaler_algo, beta=beta)\n\n def _computer_harmoic_mean_of_probabilities_over_non_zero_in_category_count_terms(self,\n cat_word_counts,\n p_category_given_word,\n p_word_given_category,\n scaler):\n df = pd.DataFrame({\n 'cat_word_counts': cat_word_counts,\n 'p_word_given_category': p_word_given_category,\n 'p_category_given_word': p_category_given_word\n })\n df_with_count = df[df['cat_word_counts'] > 0]\n df_with_count['scale p_word_given_category'] = scaler(df_with_count['p_word_given_category'])\n df_with_count['scale p_category_given_word'] = scaler(df_with_count['p_category_given_word'])\n df['scale p_word_given_category'] = 0\n df.loc[df_with_count.index, 'scale p_word_given_category'] = df_with_count['scale p_word_given_category']\n df['scale p_category_given_word'] = 0\n df.loc[df_with_count.index, 'scale p_category_given_word'] \\\n = df_with_count['scale p_category_given_word']\n score = hmean([df_with_count['scale p_category_given_word'],\n df_with_count['scale p_word_given_category']])\n df['score'] = 0\n df.loc[df_with_count.index, 'score'] = score\n return df['score']\n\n def _get_scaler_function(self, scaler_algo):\n scaler = None\n if scaler_algo == 'percentile':\n scaler = lambda x: rankdata(x).astype(np.float64) / len(x)\n elif scaler_algo == 'normcdf':\n # scaler = lambda x: ECDF(x[cat_word_counts != 0])(x)\n scaler = lambda x: norm.cdf(x, x.mean(), x.std())\n elif scaler_algo == 'none':\n scaler = lambda x: x\n else:\n raise InvalidScalerException(\"Invalid scaler alogrithm. Must be either percentile or normcdf.\")\n return scaler\n\n def get_fisher_scores(self, category):\n cat_word_counts, not_cat_word_counts = self._get_catetgory_and_non_category_word_counts(category)\n return self._get_fisher_scores_from_counts(cat_word_counts, not_cat_word_counts)\n\n def get_fisher_scores_vs_background(self):\n '''\n Returns\n -------\n pd.DataFrame of fisher scores vs background\n '''\n df = self.get_term_and_background_counts()\n odds_ratio, p_values = self._get_fisher_scores_from_counts(\n df['corpus'], df['background'])\n df['Odds ratio'] = odds_ratio\n df['Bonferroni-corrected p-values'] = p_values * len(df)\n df.sort_values(by=['Bonferroni-corrected p-values', 'Odds ratio'],\n ascending=[True, False])\n return df\n\n def get_posterior_mean_ratio_scores_vs_background(self):\n '''\n Returns\n -------\n pd.DataFrame of posterior mean scores vs background\n '''\n df = self.get_term_and_background_counts()\n df['Log Posterior Mean Ratio'] = self._get_posterior_mean_ratio_from_counts(df['corpus'],\n df['background'])\n return df.sort_values('Log Posterior Mean Ratio', ascending=False)\n\n def _get_catetgory_and_non_category_word_counts(self, category):\n self._validate_category(category)\n cat_word_counts = self._X[self._get_mask_from_category(category)].sum(axis=0).A1\n not_cat_word_counts = self._X[self._y != self._category_idx_store.getidx(category)].sum(axis=0).A1\n return cat_word_counts, not_cat_word_counts\n\n def _validate_category(self, category):\n if category not in self.get_categories():\n raise Exception(\"Invalid category: %s, valid: %s\" % (category, self.get_categories()))\n\n def _get_fisher_scores_from_counts(self, cat_word_counts, not_cat_word_counts):\n cat_not_word_counts = cat_word_counts.sum() - cat_word_counts\n not_cat_not_word_counts = not_cat_word_counts.sum() - not_cat_word_counts\n\n def do_fisher_exact(x):\n return fisher_exact([[x[0], x[1]], [x[2], x[3]]], alternative='greater')\n\n odds_ratio, p_values = np.apply_along_axis(\n do_fisher_exact,\n 0,\n np.array([cat_word_counts, cat_not_word_counts, not_cat_word_counts, not_cat_not_word_counts]))\n return odds_ratio, p_values\n\n def get_rudder_scores_vs_background(self):\n '''\n Returns\n -------\n pd.DataFrame of rudder scores vs background\n '''\n df = self.get_term_and_background_counts()\n corpus_percentiles = self._get_percentiles_from_freqs(df['corpus'])\n background_percentiles = self._get_percentiles_from_freqs(df['background'])\n df['Rudder'] = (self._get_rudder_scores_for_percentile_pair(corpus_percentiles,\n background_percentiles))\n df = df.sort_values(by='Rudder', ascending=True)\n return df\n\n def _rescale_labels_to_neg_one_pos_one(self, category):\n return (self._get_mask_from_category(category)) * 2 - 1\n\n def _get_rudder_scores_for_percentile_pair(self, category_percentiles, not_category_percentiles):\n return np.linalg.norm(np.array([1, 0])\n - np.array(list(zip(category_percentiles, not_category_percentiles))),\n axis=1)\n\n def _get_term_percentiles_in_category(self, category):\n mask = self._get_mask_from_category(category)\n return self._get_frequency_percentiles(mask)\n\n def _get_mask_from_category(self, category):\n return self._y == self._category_idx_store.getidx(category)\n\n def _get_term_percentiles_not_in_category(self, category):\n mask = self._y != self._category_idx_store.getidx(category)\n return self._get_frequency_percentiles(mask)\n\n def _get_frequency_percentiles(self, mask):\n freqs = self._X[mask].sum(axis=0).A1\n percentiles = self._get_percentiles_from_freqs(freqs)\n return percentiles\n\n def _get_percentiles_from_freqs(self, freqs):\n return rankdata(freqs) / len(freqs)\n\n def get_term_category_frequencies(self, scatterchartdata):\n '''\n Applies the ranker in scatterchartdata to term-category frequencies.\n\n Parameters\n ----------\n scatterchartdata : ScatterChartData\n\n Returns\n -------\n pd.DataFrame\n '''\n term_ranker = scatterchartdata.term_ranker(self)\n if scatterchartdata.use_non_text_features:\n term_ranker.use_non_text_features()\n return term_ranker.get_ranks()\n\n def get_category_ids(self):\n '''\n Returns array of category ids\n\n Returns\n -------\n np.array\n '''\n return self._y\n\n def get_category_index_store(self):\n '''\n Returns IndexStore object mapping categories to ids\n\n Returns\n -------\n IndexStore\n '''\n return self._category_idx_store\n\n def recategorize(self, new_categories):\n '''\n Parameters\n ----------\n new_categories : array like\n String names of new categories. Length should be equal to number of documents\n\n Returns\n -------\n TermDocMatrix\n '''\n assert len(new_categories) == self.get_num_docs()\n\n new_category_idx_store = IndexStoreFromList.build(set(new_categories))\n new_y = np.array(new_category_idx_store.getidxstrictbatch(new_categories))\n\n new_tdm = self._make_new_term_doc_matrix(self._X, self._mX, new_y, self._term_idx_store, new_category_idx_store,\n self._metadata_idx_store, new_y == new_y)\n return new_tdm\n\n def use_external_metadata_lists(self, metadata_lists):\n '''\n Takes a list of string lists. Each list corresponds to metadata to associate its corresponding document.\n :param metadata: List[List[str]]\n :return: new TermDocMatrix\n '''\n metadata_index_store = IndexStore()\n metadata_csr_factory = CSRMatrixFactory()\n assert len(metadata_lists) == self.get_num_docs()\n for doc_i, metadata_list in enumerate(metadata_lists):\n for metadatum in metadata_list:\n # raise Exception(str(metadatum)\n # + \" \" + str(type(metadatum)) + \" \" + str(len(metadatum)) + str(metadata_list)\n # + \" \" + str(type(metadata_list)) + \" \" + str(len(metadata_list)) + str(metadata_lists))\n # raise Exception(f\"METADATUM {metadatum} \" + metadatum + \":::\" + metadata_list)\n metadata_csr_factory[doc_i, metadata_index_store.getidx(metadatum)] = 1\n\n return self._make_new_term_doc_matrix(\n new_mX=metadata_csr_factory.get_csr_matrix(dtype=int),\n new_metadata_idx_store=metadata_index_store,\n new_y_mask=self._y == self._y\n )\n\n def use_doc_labeled_terms_as_metadata(self, doc_labels, separator='_', replace_metadata = True):\n '''\n Makes the metadata of a new TermDocMatrix a copy of the term-document matrix, except each term is prefixed\n by its document's label followed by the separator.\n\n :param doc_labels: list[str], should be the same size as the number of documents in the TermDocMatrix.\n :param separator: str, default is '_'\n :return: self\n '''\n\n assert len(doc_labels) == self.get_num_docs()\n\n doc_labels = np.array(doc_labels)\n\n terms_in_corpus = np.array(self._term_idx_store.values())\n new_metadata_list = []\n new_meta_X = None\n\n ordered_doc_labels = list(sorted(set(doc_labels)))\n X = self._X\n if replace_metadata:\n #X = self._mX\n X = self._X\n\n for doc_label in ordered_doc_labels:\n label_doc_mask = doc_labels == doc_label\n label_X = X[label_doc_mask, :]\n label_term_mask = (X.sum(axis=0) > 0).A1\n label_X = label_X[:, label_term_mask]\n cols_to_pad = len(new_metadata_list)\n\n new_metadata_list += [doc_label + separator + term\n for term in terms_in_corpus[label_term_mask]]\n if new_meta_X is None:\n new_meta_X = label_X\n else:\n label_X_pad = (CSRMatrixFactory()\n .set_last_col_idx(cols_to_pad - 1)\n .set_last_row_idx(sum(label_doc_mask) - 1)\n .get_csr_matrix())\n padded_label_X = scipy.sparse.hstack([label_X_pad, label_X])\n new_meta_X.resize(new_meta_X.shape[0], padded_label_X.shape[1])\n new_meta_X = scipy.sparse.vstack([new_meta_X,\n padded_label_X])\n\n new_metadata_idx_store = IndexStoreFromList.build(new_metadata_list)\n new_meta_X = new_meta_X.tocsr()\n new_mX = (CSRMatrixFactory()\n .set_last_col_idx(new_meta_X.shape[1] - 1)\n .set_last_row_idx(new_meta_X.shape[0] - 1)\n .get_csr_matrix().tolil())\n start_row = 0\n for doc_label in ordered_doc_labels:\n label_doc_mask = doc_labels == doc_label\n num_rows = sum(label_doc_mask)\n new_mX[label_doc_mask, :] = new_meta_X[start_row:start_row + num_rows, :]\n start_row += num_rows\n\n new_mX = new_mX.tocsr()\n new_tdm = self._make_new_term_doc_matrix(self._X,\n new_mX,\n self._y,\n self._term_idx_store,\n self._category_idx_store,\n new_metadata_idx_store,\n self._y == self._y)\n return new_tdm\n\n def use_categories_as_metadata(self):\n '''\n Returns a TermDocMatrix which is identical to self except the metadata values are now identical to the\n categories present.\n\n :return: TermDocMatrix\n '''\n new_metadata_factory = CSRMatrixFactory()\n for i, category_idx in enumerate(self.get_category_ids()):\n new_metadata_factory[i, category_idx] = 1\n new_metadata = new_metadata_factory.get_csr_matrix()\n new_tdm = self._make_new_term_doc_matrix(self._X,\n new_metadata,\n self._y,\n self._term_idx_store,\n self._category_idx_store,\n copy(self._category_idx_store),\n self._y == self._y)\n return new_tdm\n\n def use_categories_as_metadata_and_replace_terms(self):\n '''\n Returns a TermDocMatrix which is identical to self except the metadata values are now identical to the\n categories present and term-doc-matrix is now the metadata matrix.\n\n :return: TermDocMatrix\n '''\n new_metadata_factory = CSRMatrixFactory()\n for i, category_idx in enumerate(self.get_category_ids()):\n new_metadata_factory[i, category_idx] = 1\n new_metadata = new_metadata_factory.get_csr_matrix()\n new_tdm = self._make_new_term_doc_matrix(self._mX,\n new_metadata,\n self._y,\n self._metadata_idx_store,\n self._category_idx_store,\n copy(self._category_idx_store),\n self._y == self._y)\n return new_tdm\n\n def copy_terms_to_metadata(self):\n '''\n Returns a TermDocMatrix which is identical to self except the metadata values are now identical to the\n term document matrix.\n\n :return: TermDocMatrix\n '''\n return self._make_new_term_doc_matrix(\n new_mX=copy(self._X),\n new_metadata_idx_store=copy(self._term_idx_store),\n new_y_mask=self._y == self._y\n )\n\n def get_num_categories(self):\n '''\n Returns the number of categories in the term document matrix\n :return: int\n '''\n return len(self.get_categories())\n"
] |
[
[
"numpy.log",
"sklearn.model_selection.cross_val_predict",
"sklearn.linear_model.LogisticRegression",
"scipy.stats.rankdata",
"scipy.stats.fisher_exact",
"sklearn.linear_model.ElasticNet",
"sklearn.linear_model.RidgeClassifierCV",
"scipy.sparse.csr_matrix",
"pandas.DataFrame",
"scipy.stats.hmean",
"sklearn.feature_extraction.text.TfidfTransformer",
"scipy.sparse.vstack",
"sklearn.linear_model.LassoCV",
"scipy.sparse.hstack",
"numpy.array",
"numpy.zeros",
"numpy.isin"
]
] |
[
{
"matplotlib": [],
"numpy": [],
"pandas": [
"0.23",
"0.21",
"2.0",
"1.4",
"0.19",
"1.1",
"1.5",
"1.2",
"0.24",
"0.20",
"1.0",
"0.25",
"1.3"
],
"scipy": [
"1.6",
"1.10",
"1.4",
"1.3",
"1.9",
"0.19",
"1.5",
"0.18",
"1.7",
"1.0",
"0.17",
"1.2",
"1.8"
],
"tensorflow": []
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.